From ec97943683577867d7ddc86bef9b691b127c08ef Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 02:58:55 -0700 Subject: [PATCH 01/44] [Makefile] Add interface for providing more flags into the generator Signed-off-by: eop Chen --- rvv-intrinsic-generator/Makefile | 57 ++++++++++++-------------------- 1 file changed, 22 insertions(+), 35 deletions(-) diff --git a/rvv-intrinsic-generator/Makefile b/rvv-intrinsic-generator/Makefile index 5044f51ff..31de080e5 100644 --- a/rvv-intrinsic-generator/Makefile +++ b/rvv-intrinsic-generator/Makefile @@ -55,6 +55,8 @@ MAIN := rvv_intrinsic_gen.main BF16_INST := $(RVV_INTRINSIC_GEN_PATH)/bfloat16_inst.py # Script to clang-format the auto-generated adoc files CLANG_FORMAT_ADOC = clang_format_autogen +# Extra flags specified when calling rvv_intrinsic_gen.main +EXTRA_FLAG := # Main output directory is default to auto-generated OUTPUT_DIR := ../auto-generated # Derives output directory for each set of intrinsics @@ -164,50 +166,38 @@ gen-gnu-test: gnu-overloaded-test gnu-non-overloaded-test # Generate all-in-one document for non-overloaded intrinsics non-overloaded-doc: - $(call gen_doc,$(DIR),intrinsic_funcs.adoc,$@,) - $(call gen_doc,$(POLICY_DIR),intrinsic_funcs.adoc,$@,--has-policy) - $(call clang_format_adoc, --file, $(DIR)/intrinsic_funcs.adoc) - $(call clang_format_adoc, --file, $(POLICY_DIR)/intrinsic_funcs.adoc) + $(call gen_doc,$(DIR),intrinsic_funcs.md,$@,$(EXTRA_FLAG)) + $(call gen_doc,$(POLICY_DIR),intrinsic_funcs.md,$@,--has-policy $(EXTRA_FLAG)) # Generate grouped documents for non-overloaded intrinsics non-overloaded-docs: - $(call gen_docs,$(DIR),intrinsic_funcs,$@,) - $(call gen_docs,$(POLICY_DIR),intrinsic_funcs,$@,--has-policy) - $(call clang_format_adoc, --folder, $(DIR)/intrinsic_funcs) - $(call clang_format_adoc, --folder, $(POLICY_DIR)/intrinsic_funcs) + $(call gen_docs,$(DIR),intrinsic_funcs,$@,$(EXTRA_FLAG)) + $(call gen_docs,$(POLICY_DIR),intrinsic_funcs,$@,--has-policy $(EXTRA_FLAG)) # Generate all-in-one document for overloaded intrinsics overloaded-doc: - $(call gen_doc,$(DIR),overloaded_intrinsic_funcs.adoc,$@,) - $(call gen_doc,$(POLICY_DIR),overloaded_intrinsic_funcs.adoc,$@,--has-policy) - $(call clang_format_adoc, --file, $(DIR)/overloaded_intrinsic_funcs.adoc) - $(call clang_format_adoc, --file, $(POLICY_DIR)/overloaded_intrinsic_funcs.adoc) + $(call gen_doc,$(DIR),overloaded_intrinsic_funcs.md,$@,$(EXTRA_FLAG)) + $(call gen_doc,$(POLICY_DIR),overloaded_intrinsic_funcs.md,$@,--has-policy $(EXTRA_FLAG)) # Generate grouped documents for overloaded intrinsics overloaded-docs: - $(call gen_docs,$(DIR),overloaded_intrinsic_funcs,$@,) - $(call gen_docs,$(POLICY_DIR),overloaded_intrinsic_funcs,$@,--has-policy) - $(call clang_format_adoc, --folder, $(DIR)/overloaded_intrinsic_funcs) - $(call clang_format_adoc, --folder, $(POLICY_DIR)/overloaded_intrinsic_funcs) + $(call gen_docs,$(DIR),overloaded_intrinsic_funcs,$@,$(EXTRA_FLAG)) + $(call gen_docs,$(POLICY_DIR),overloaded_intrinsic_funcs,$@,--has-policy $(EXTRA_FLAG)) # Generate non-overloaded intrinsic testing C source files non-overloaded-test: - $(call gen_tests,$(DIR)/api-testing,non-overloaded-test,) - $(call gen_tests,$(POLICY_DIR)/api-testing,non-overloaded-test,--has-policy) - clang-format -i $(DIR)/api-testing/* - clang-format -i $(POLICY_DIR)/api-testing/* + $(call gen_tests,$(DIR)/api-testing,non-overloaded-test,$(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/api-testing,non-overloaded-test,--has-policy $(EXTRA_FLAG)) # Generate overloaded intrinsic testing C source files overloaded-test: - $(call gen_tests,$(DIR)/overloaded-api-testing,overloaded-test,) - $(call gen_tests,$(POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy) - clang-format -i $(DIR)/overloaded-api-testing/* - clang-format -i $(POLICY_DIR)/overloaded-api-testing/* + $(call gen_tests,$(DIR)/overloaded-api-testing,overloaded-test,$(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy $(EXTRA_FLAG)) # Generate non-overloaded intrinsic testing C source files llvm-non-overloaded-test: - $(call gen_tests,$(DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm) - $(call gen_tests,$(POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy) + $(call gen_tests,$(DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm $(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy $(EXTRA_FLAG)) $(call replace_float, $(DIR)/llvm-api-tests) $(call replace_float, $(POLICY_DIR)/llvm-api-tests) clang-format -i $(DIR)/llvm-api-tests/* @@ -215,8 +205,8 @@ llvm-non-overloaded-test: # Generate overloaded intrinsic testing C source files llvm-overloaded-test: - $(call gen_tests,$(DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm) - $(call gen_tests,$(POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy) + $(call gen_tests,$(DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm $(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy $(EXTRA_FLAG)) $(call replace_float, $(DIR)/llvm-overloaded-tests) $(call replace_float, $(POLICY_DIR)/llvm-overloaded-tests) clang-format -i $(DIR)/llvm-overloaded-tests/* @@ -292,18 +282,15 @@ bf16-llvm-overloaded-test: # Generate the adaptor header for v0.10 non-policy-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,non-policy.h,non-overloaded-compatible-header,) + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,non-policy.h,non-overloaded-compatible-header,$(EXTRA_FLAG)) policy-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,policy.h,non-overloaded-compatible-header,--has-policy) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,policy.h,non-overloaded-compatible-header,--has-policy $(EXTRA_FLAG)) non-policy-overloaded-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-non-policy.h,overloaded-compatible-header,) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-non-policy.h,overloaded-compatible-header,$(EXTRA_FLAG)) policy-overloaded-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-policy.h,overloaded-compatible-header,--has-policy) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-policy.h,overloaded-compatible-header,--has-policy $(EXTRA_FLAG)) ############################################################################### From 1a2014c2ba3c1e3179c07129a946e45a406f1b5a Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 03:36:39 -0700 Subject: [PATCH 02/44] [vector-crypto] Define intrinsics for the Zvbb extension Signed-off-by: eop Chen --- .../rvv_intrinsic_gen/constants.py | 1 + .../rvv_intrinsic_gen/main.py | 18 ++- .../templates/vector_crypto_template.py | 104 ++++++++++++++++++ .../rvv_intrinsic_gen/vector_crypto_inst.py | 65 +++++++++++ 4 files changed, 186 insertions(+), 2 deletions(-) create mode 100644 rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py create mode 100644 rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py index e2ae21964..5d3f20c6c 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py @@ -28,6 +28,7 @@ NSEWS = [16, 32, 64] TYPES = ["float", "int", "uint"] ITYPES = ["int", "uint"] +UITYPE = ["uint"] FTYPES = ["float"] MTYPES = ["bool"] MLENS = [1, 2, 4, 8, 16, 32, 64] diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py index f9b84daf1..fe0205d1b 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py @@ -24,6 +24,7 @@ import importlib.util import inspect import inst +import vector_crypto_inst import generator from enums import ToolChainType @@ -104,6 +105,7 @@ class GenTypes: parser.add_argument("--skip-default-inst", default=False, action="store_true") parser.add_argument("--vendor-generator-script") parser.add_argument("--vendor-generator-name") + parser.add_argument("--gen-vector-crypto", default=False, action="store_true") parser.add_argument("--out") args = parser.parse_args() @@ -137,6 +139,12 @@ class GenTypes: GenTypes.NON_OVERLOADED_COMPATIBLE_HEADER, GenTypes.OVERLOADED_COMPATIBLE_HEADER ]: + # Vector crypto does not need compatible header because we don't have + # them before v0.10 + if mode in (GenTypes.NON_OVERLOADED_COMPATIBLE_HEADER, + GenTypes.OVERLOADED_COMPATIBLE_HEADER) and\ + args.gen_vector_crypto: + return with open(args.out, "w", encoding="utf-8") as f: if mode == GenTypes.NON_OVERLOADED_DOC: g = generator.DocGenerator(f, True, args.has_policy) @@ -150,7 +158,10 @@ class GenTypes: assert False if not args.skip_default_inst: - inst.gen(g) + if args.gen_vector_crypto: + vector_crypto_inst.gen(g) + else: + inst.gen(g) else: print("Skipping default RVV instructions (--skip-default-inst)") if vendor_gen is not None: @@ -173,7 +184,10 @@ class GenTypes: else: assert False if not args.skip_default_inst: - inst.gen(g) + if args.gen_vector_crypto: + vector_crypto_inst.gen(g) + else: + inst.gen(g) else: print("Skipping default RVV instructions (--skip-default-inst)") if vendor_gen is not None: diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py new file mode 100644 index 000000000..9574bbbb8 --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -0,0 +1,104 @@ +""" +Template for rendering vector crypto intrinsics. +Current version is for v20230531. +https://github.com/riscv/riscv-crypto/blob/v20230531/doc/vector/riscv-crypto-spec-vector.adoc +""" + +from utils import prod +from utils import TypeHelper +from enums import InstInfo +from enums import InstType +from enums import ExtraAttr + +operand_mnemonic_dict = {} +# Zvbb: Vector Bit-manipulation used in Cryptography +operand_mnemonic_dict["vandn"] = ["vv", "vx"] +operand_mnemonic_dict["vbrev"] = ["v"] +operand_mnemonic_dict["vbrev8"] = ["v"] +operand_mnemonic_dict["vrev8"] = ["v"] +operand_mnemonic_dict["vclz"] = ["v"] +operand_mnemonic_dict["vctz"] = ["v"] +operand_mnemonic_dict["vcpop"] = ["v"] +operand_mnemonic_dict["vrol"] = ["vv", "vx"] +operand_mnemonic_dict["vror"] = ["vv", "vx"] # saving the `vi` variant +operand_mnemonic_dict["vwsll"] = ["vv", "vx"] # saving the `vi` variant + + +def has_vs1_input(name): + has_vs1_input_inst_set = {"vandn", "vrol", "vror", "vwsll"} + + return name in has_vs1_input_inst_set + + +def has_rs1_input(name): + has_rs1_input_inst_set = {"vandn", "vrol", "vror", "vwsll"} + + return name in has_rs1_input_inst_set + + +def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): + #pylint: disable=invalid-name + # FIXME: Renaming 'G' to 'g' all in once later. + G.inst_group_prologue() + + for decorator in decorator_list: + decorator.write_text_header(G) + for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + op = args["OP"] + for operand_mnemonic in operand_mnemonic_dict[op]: + if operand_mnemonic in ("vv", "vs"): + if op == "vwsll": + inst_info = InstInfo.get(args, decorator, InstType.WVV, + ExtraAttr.NO_ATTR) + else: + inst_info = InstInfo.get(args, decorator, InstType.VV, + ExtraAttr.NO_ATTR) + elif operand_mnemonic == "vx": + if op == "vwsll": + inst_info = InstInfo.get(args, decorator, InstType.WVX, + ExtraAttr.NO_ATTR) + else: + inst_info = InstInfo.get(args, decorator, InstType.VX, + ExtraAttr.NO_ATTR) + elif operand_mnemonic == "v": + inst_info = InstInfo.get(args, decorator, InstType.V, + ExtraAttr.NO_ATTR) + else: + assert False, "Unreachable, unrecognized mnemonic" + + args["MNEMONIC"] = operand_mnemonic + type_helper = TypeHelper(**args) + kwargs = {} + if op == "vwsll": + kwargs["return_type"] = type_helper.wv + else: + kwargs["return_type"] = type_helper.v + kwargs = {**kwargs, **decorator.mask_args(type_helper.m, type_helper.v)} + if op == "vwsll": + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.wv)} + else: + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.v)} + + kwargs["vs2"] = type_helper.v + + if operand_mnemonic == "vv" and has_vs1_input(op): + kwargs["vs1"] = type_helper.v + if operand_mnemonic == "vx" and has_rs1_input(op): + if op in ["vwsll", "vrol", "vror"]: + kwargs["rs1"] = type_helper.size_t + else: + kwargs["rs1"] = type_helper.s + + kwargs["vl"] = type_helper.size_t + + if op == "vwsll": + args["SEW"] = args["WSEW"] + args["LMUL"] = args["WLMUL"] + + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + + G.inst_group_epilogue() diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py new file mode 100644 index 000000000..5cfe60263 --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -0,0 +1,65 @@ +""" +Declares the vector crypto intrinsics through the vector crypto template. +""" + +from intrinsic_decorator import IntrinsicDecorators +from templates import vector_crypto_template +from constants import LMULS, WLMULS, SEWS, WSEWS, UITYPE + + +def gen(g): + decorators = IntrinsicDecorators(g.has_tail_policy) + + g.start_group("Zvbb - Vector Bit-manipulation used in Cryptography") + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Bitwise And-Not", + "", # FIXME: We probably have a separate document for vector-crypto + ["vandn"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Reverse Bits", + "", # FIXME: We probably have a separate document for vector-crypto + ["vbrev", "vbrev8", "vrev8"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Count Bits", + "", # FIXME: We probably have a separate document for vector-crypto + ["vclz", "vctz"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_no_maskedoff) + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Rotate", + "", # FIXME: We probably have a separate document for vector-crypto + ["vrol", "vror"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Shift", + "", # FIXME: We probably have a separate document for vector-crypto + ["vwsll"], + UITYPE, + WSEWS, + WLMULS, + decorators.has_masking_maskedoff_policy) + + #################################################################### From 9785c5f6bb9ef0de962bdaacd6dc4af79545906a Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 10:59:47 -0700 Subject: [PATCH 03/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 581 +++++++++++ ...r_bit-manipulation_used_in_cryptography.md | 581 +++++++++++ .../overloaded_intrinsic_funcs.md | 581 +++++++++++ ...r_bit-manipulation_used_in_cryptography.md | 581 +++++++++++ .../policy_funcs/intrinsic_funcs.md | 953 ++++++++++++++++++ ...r_bit-manipulation_used_in_cryptography.md | 953 ++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 953 ++++++++++++++++++ ...r_bit-manipulation_used_in_cryptography.md | 953 ++++++++++++++++++ 8 files changed, 6136 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs.md create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md new file mode 100644 index 000000000..80f99bfc5 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -0,0 +1,581 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4 (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2 (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1 (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2 (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4 (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8 (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4 (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2 (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1 (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2 (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4 (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8 (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2 (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1 (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2 (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4 (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8 (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md new file mode 100644 index 000000000..80f99bfc5 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -0,0 +1,581 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4 (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2 (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1 (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2 (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4 (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8 (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4 (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2 (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1 (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2 (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4 (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8 (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2 (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1 (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2 (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4 (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8 (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md new file mode 100644 index 000000000..d4d9ea35a --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -0,0 +1,581 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md new file mode 100644 index 000000000..d4d9ea35a --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -0,0 +1,581 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vbool8_t mask, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vbool8_t mask, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vbool4_t mask, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vbool2_t mask, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vbool1_t mask, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vbool16_t mask, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vbool8_t mask, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vbool4_t mask, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vbool2_t mask, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vbool32_t mask, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vbool16_t mask, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vbool8_t mask, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vbool4_t mask, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vbool64_t mask, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vbool32_t mask, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vbool16_t mask, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vbool8_t mask, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md new file mode 100644 index 000000000..f5ef93699 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -0,0 +1,953 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): +This operation don't have Policy Intrinsic Functions. + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md new file mode 100644 index 000000000..f5ef93699 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -0,0 +1,953 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): +This operation don't have Policy Intrinsic Functions. + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md new file mode 100644 index 000000000..c94663c42 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -0,0 +1,953 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): +This operation don't have Policy Intrinsic Functions. + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md new file mode 100644 index 000000000..c94663c42 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -0,0 +1,953 @@ + +## Zvbb - Vector Bit-manipulation used in Cryptography: + +### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Count Bits](): +This operation don't have Policy Intrinsic Functions. + +### [Vector Bit-manipulation used in Cryptography - Rotate](): + +**Prototypes:** +``` C +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` + +### [Vector Bit-manipulation used in Cryptography - Shift](): + +**Prototypes:** +``` C +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +``` From 97cfa6493b60a6ce7a49bd72118af6eb775b418f Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 10:59:48 -0700 Subject: [PATCH 04/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vandn.c | 358 +++++++++ .../vector-crypto/api-testing/vbrev.c | 182 +++++ .../vector-crypto/api-testing/vbrev8.c | 182 +++++ .../vector-crypto/api-testing/vclz.c | 182 +++++ .../vector-crypto/api-testing/vctz.c | 182 +++++ .../vector-crypto/api-testing/vrev8.c | 182 +++++ .../vector-crypto/api-testing/vrol.c | 358 +++++++++ .../vector-crypto/api-testing/vror.c | 358 +++++++++ .../vector-crypto/api-testing/vwsll.c | 246 ++++++ .../vector-crypto/llvm-api-tests/vandn.c | 359 +++++++++ .../vector-crypto/llvm-api-tests/vbrev.c | 183 +++++ .../vector-crypto/llvm-api-tests/vbrev8.c | 183 +++++ .../vector-crypto/llvm-api-tests/vclz.c | 183 +++++ .../vector-crypto/llvm-api-tests/vctz.c | 183 +++++ .../vector-crypto/llvm-api-tests/vrev8.c | 183 +++++ .../vector-crypto/llvm-api-tests/vrol.c | 359 +++++++++ .../vector-crypto/llvm-api-tests/vror.c | 359 +++++++++ .../vector-crypto/llvm-api-tests/vwsll.c | 247 ++++++ .../llvm-overloaded-tests/vandn.c | 359 +++++++++ .../llvm-overloaded-tests/vbrev.c | 183 +++++ .../llvm-overloaded-tests/vbrev8.c | 183 +++++ .../llvm-overloaded-tests/vclz.c | 183 +++++ .../llvm-overloaded-tests/vctz.c | 183 +++++ .../llvm-overloaded-tests/vrev8.c | 183 +++++ .../llvm-overloaded-tests/vrol.c | 359 +++++++++ .../llvm-overloaded-tests/vror.c | 359 +++++++++ .../llvm-overloaded-tests/vwsll.c | 247 ++++++ .../overloaded-api-testing/vandn.c | 358 +++++++++ .../overloaded-api-testing/vbrev.c | 182 +++++ .../overloaded-api-testing/vbrev8.c | 182 +++++ .../overloaded-api-testing/vclz.c | 182 +++++ .../overloaded-api-testing/vctz.c | 182 +++++ .../overloaded-api-testing/vrev8.c | 182 +++++ .../overloaded-api-testing/vrol.c | 358 +++++++++ .../overloaded-api-testing/vror.c | 358 +++++++++ .../overloaded-api-testing/vwsll.c | 246 ++++++ .../policy_funcs/api-testing/vandn.c | 710 +++++++++++++++++ .../policy_funcs/api-testing/vbrev.c | 358 +++++++++ .../policy_funcs/api-testing/vbrev8.c | 358 +++++++++ .../policy_funcs/api-testing/vrev8.c | 358 +++++++++ .../policy_funcs/api-testing/vrol.c | 710 +++++++++++++++++ .../policy_funcs/api-testing/vror.c | 710 +++++++++++++++++ .../policy_funcs/api-testing/vwsll.c | 486 ++++++++++++ .../policy_funcs/llvm-api-tests/vandn.c | 711 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vbrev.c | 359 +++++++++ .../policy_funcs/llvm-api-tests/vbrev8.c | 359 +++++++++ .../policy_funcs/llvm-api-tests/vrev8.c | 359 +++++++++ .../policy_funcs/llvm-api-tests/vrol.c | 711 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vror.c | 711 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vwsll.c | 487 ++++++++++++ .../llvm-overloaded-tests/vandn.c | 711 ++++++++++++++++++ .../llvm-overloaded-tests/vbrev.c | 359 +++++++++ .../llvm-overloaded-tests/vbrev8.c | 359 +++++++++ .../llvm-overloaded-tests/vrev8.c | 359 +++++++++ .../policy_funcs/llvm-overloaded-tests/vrol.c | 711 ++++++++++++++++++ .../policy_funcs/llvm-overloaded-tests/vror.c | 711 ++++++++++++++++++ .../llvm-overloaded-tests/vwsll.c | 487 ++++++++++++ .../overloaded-api-testing/vandn.c | 710 +++++++++++++++++ .../overloaded-api-testing/vbrev.c | 358 +++++++++ .../overloaded-api-testing/vbrev8.c | 358 +++++++++ .../overloaded-api-testing/vrev8.c | 358 +++++++++ .../overloaded-api-testing/vrol.c | 710 +++++++++++++++++ .../overloaded-api-testing/vror.c | 710 +++++++++++++++++ .../overloaded-api-testing/vwsll.c | 486 ++++++++++++ 64 files changed, 23712 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vandn.c create mode 100644 auto-generated/vector-crypto/api-testing/vbrev.c create mode 100644 auto-generated/vector-crypto/api-testing/vbrev8.c create mode 100644 auto-generated/vector-crypto/api-testing/vclz.c create mode 100644 auto-generated/vector-crypto/api-testing/vctz.c create mode 100644 auto-generated/vector-crypto/api-testing/vrev8.c create mode 100644 auto-generated/vector-crypto/api-testing/vrol.c create mode 100644 auto-generated/vector-crypto/api-testing/vror.c create mode 100644 auto-generated/vector-crypto/api-testing/vwsll.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vandn.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vbrev.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vbrev8.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vclz.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vctz.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vrev8.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vrol.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vror.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vwsll.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vror.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vandn.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vbrev.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vclz.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vctz.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vrev8.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vrol.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vror.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vwsll.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vror.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c diff --git a/auto-generated/vector-crypto/api-testing/vandn.c b/auto-generated/vector-crypto/api-testing/vandn.c new file mode 100644 index 000000000..50ca46138 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vandn.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_m(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_m(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_m(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_m(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_m(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_m(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_m(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_m(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vbrev.c b/auto-generated/vector-crypto/api-testing/vbrev.c new file mode 100644 index 000000000..97d4855ac --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vbrev.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vbrev8.c b/auto-generated/vector-crypto/api-testing/vbrev8.c new file mode 100644 index 000000000..323154304 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vbrev8.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vclz.c b/auto-generated/vector-crypto/api-testing/vclz.c new file mode 100644 index 000000000..655af1c63 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclz.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vctz.c b/auto-generated/vector-crypto/api-testing/vctz.c new file mode 100644 index 000000000..262e6be9b --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vctz.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vrev8.c b/auto-generated/vector-crypto/api-testing/vrev8.c new file mode 100644 index 000000000..9d2ea220c --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vrev8.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vrol.c b/auto-generated/vector-crypto/api-testing/vrol.c new file mode 100644 index 000000000..41fdc7637 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vrol.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_m(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_m(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_m(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_m(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_m(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_m(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_m(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_m(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vror.c b/auto-generated/vector-crypto/api-testing/vror.c new file mode 100644 index 000000000..c00b0b98e --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vror.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_m(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_m(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_m(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_m(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_m(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_m(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_m(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_m(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vwsll.c b/auto-generated/vector-crypto/api-testing/vwsll.c new file mode 100644 index 000000000..a36e5a3c6 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vwsll.c @@ -0,0 +1,246 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/llvm-api-tests/vandn.c new file mode 100644 index 000000000..ac15e471b --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vandn.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_m(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_m(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_m(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_m(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_m(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_m(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_m(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_m(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c new file mode 100644 index 000000000..26c4de404 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c new file mode 100644 index 000000000..d22110c4f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/llvm-api-tests/vclz.c new file mode 100644 index 000000000..9ce26f56f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclz.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/llvm-api-tests/vctz.c new file mode 100644 index 000000000..504efd27a --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vctz.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c new file mode 100644 index 000000000..f5d49ee05 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(mask, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(mask, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(mask, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(mask, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(mask, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(mask, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(mask, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(mask, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(mask, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(mask, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(mask, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(mask, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(mask, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(mask, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(mask, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(mask, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(mask, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(mask, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(mask, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(mask, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(mask, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/llvm-api-tests/vrol.c new file mode 100644 index 000000000..1154de852 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vrol.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_m(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_m(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_m(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_m(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_m(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_m(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_m(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_m(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vror.c b/auto-generated/vector-crypto/llvm-api-tests/vror.c new file mode 100644 index 000000000..694b6e0e0 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vror.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_m(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_m(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_m(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_m(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_m(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_m(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_m(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_m(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c new file mode 100644 index 000000000..ca3fdaa23 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c @@ -0,0 +1,247 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_m(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_m(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_m(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_m(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_m(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_m(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_m(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_m(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_m(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_m(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_m(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c new file mode 100644 index 000000000..e2894d7e4 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c new file mode 100644 index 000000000..0c95750c7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c new file mode 100644 index 000000000..d94465fe5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..f1da0ff12 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..2dc00bb3f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c new file mode 100644 index 000000000..72738a4c7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c new file mode 100644 index 000000000..51fab3b0c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c new file mode 100644 index 000000000..f5439c7ab --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c new file mode 100644 index 000000000..f739b1cd3 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c @@ -0,0 +1,247 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c new file mode 100644 index 000000000..e744cd9fe --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c new file mode 100644 index 000000000..8c82c5496 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c new file mode 100644 index 000000000..5785a810f --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..8bea51126 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..86090d8aa --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c new file mode 100644 index 000000000..d013b9218 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c @@ -0,0 +1,182 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c new file mode 100644 index 000000000..dda6195ca --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/overloaded-api-testing/vror.c new file mode 100644 index 000000000..600fc1d66 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vror.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c new file mode 100644 index 000000000..c0e0521ff --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c @@ -0,0 +1,246 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c new file mode 100644 index 000000000..6cdb97418 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c @@ -0,0 +1,710 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c new file mode 100644 index 000000000..f4a0371a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c new file mode 100644 index 000000000..d9a0c3cc2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c new file mode 100644 index 000000000..e5a425b5f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c new file mode 100644 index 000000000..a023644e3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c @@ -0,0 +1,710 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c new file mode 100644 index 000000000..c94ef3774 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c @@ -0,0 +1,710 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c new file mode 100644 index 000000000..a99acee03 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c @@ -0,0 +1,486 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c new file mode 100644 index 000000000..cdd2befb5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c @@ -0,0 +1,711 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c new file mode 100644 index 000000000..fd694cc5a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c new file mode 100644 index 000000000..1f0433554 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c new file mode 100644 index 000000000..69737f009 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c new file mode 100644 index 000000000..088f1363a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c @@ -0,0 +1,711 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c new file mode 100644 index 000000000..b7ea078c6 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c @@ -0,0 +1,711 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c new file mode 100644 index 000000000..21b1bc7e8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c @@ -0,0 +1,487 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c new file mode 100644 index 000000000..298fa71e9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c @@ -0,0 +1,711 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c new file mode 100644 index 000000000..c0a7edfac --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c new file mode 100644 index 000000000..fda1416d8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c new file mode 100644 index 000000000..264f15a6b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c @@ -0,0 +1,359 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c new file mode 100644 index 000000000..a34a5be23 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c @@ -0,0 +1,711 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c new file mode 100644 index 000000000..5a7dad772 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c @@ -0,0 +1,711 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c new file mode 100644 index 000000000..6f9409182 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c @@ -0,0 +1,487 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c new file mode 100644 index 000000000..1ecbcdfa9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c @@ -0,0 +1,710 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c new file mode 100644 index 000000000..c0c9eb726 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c new file mode 100644 index 000000000..c375826e5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c new file mode 100644 index 000000000..55f6bf42e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c @@ -0,0 +1,358 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c new file mode 100644 index 000000000..8b7154ede --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c @@ -0,0 +1,710 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c new file mode 100644 index 000000000..b2856896f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c @@ -0,0 +1,710 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c new file mode 100644 index 000000000..76d9f8828 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c @@ -0,0 +1,486 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +} + From 45f0ebcf06232c26c6fb8ff41f8d50b2b7438f82 Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 03:58:50 -0700 Subject: [PATCH 05/44] [vector-crypto] Define intrinsics for the Zvbc extension Signed-off-by: eop Chen --- .../templates/vector_crypto_template.py | 11 +++++++++-- .../rvv_intrinsic_gen/vector_crypto_inst.py | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index 9574bbbb8..ffcc66908 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -22,16 +22,23 @@ operand_mnemonic_dict["vrol"] = ["vv", "vx"] operand_mnemonic_dict["vror"] = ["vv", "vx"] # saving the `vi` variant operand_mnemonic_dict["vwsll"] = ["vv", "vx"] # saving the `vi` variant +# Zvbc: Vector Carryless Multiplication +operand_mnemonic_dict["vclmul"] = ["vv", "vx"] +operand_mnemonic_dict["vclmulh"] = ["vv", "vx"] def has_vs1_input(name): - has_vs1_input_inst_set = {"vandn", "vrol", "vror", "vwsll"} + has_vs1_input_inst_set = { + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh" + } return name in has_vs1_input_inst_set def has_rs1_input(name): - has_rs1_input_inst_set = {"vandn", "vrol", "vror", "vwsll"} + has_rs1_input_inst_set = { + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh" + } return name in has_rs1_input_inst_set diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index 5cfe60263..e5b2625b9 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -63,3 +63,17 @@ def gen(g): decorators.has_masking_maskedoff_policy) #################################################################### + + g.start_group("Zvbc - Vector Carryless Multiplication") + + g.function_group( + vector_crypto_template, + "Vector Carryless Multiplication", + "", # FIXME: We probably have a separate document for vector-crypto + ["vclmul", "vclmulh"], + UITYPE, + [64], + LMULS, + decorators.has_masking_maskedoff_policy) + + #################################################################### From 06f2208244015d65f76c08e9d87f0186d9d554e8 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 10:59:54 -0700 Subject: [PATCH 06/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 41 ++++++++++ ..._zvbc_-_vector_carryless_multiplication.md | 41 ++++++++++ .../overloaded_intrinsic_funcs.md | 41 ++++++++++ ..._zvbc_-_vector_carryless_multiplication.md | 41 ++++++++++ .../policy_funcs/intrinsic_funcs.md | 75 +++++++++++++++++++ ..._zvbc_-_vector_carryless_multiplication.md | 75 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 75 +++++++++++++++++++ ..._zvbc_-_vector_carryless_multiplication.md | 75 +++++++++++++++++++ 8 files changed, 464 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 80f99bfc5..72aff8e5e 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -579,3 +579,44 @@ vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); ``` + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md new file mode 100644 index 000000000..4d41e53cc --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md @@ -0,0 +1,41 @@ + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index d4d9ea35a..ecbd3ea5c 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -579,3 +579,44 @@ vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t v vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); ``` + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md new file mode 100644 index 000000000..df952e521 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md @@ -0,0 +1,41 @@ + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index f5ef93699..a402d3329 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -951,3 +951,78 @@ vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vu vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); ``` + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md new file mode 100644 index 000000000..7e7effc48 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md @@ -0,0 +1,75 @@ + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index c94663c42..e9d67b88e 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -951,3 +951,78 @@ vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); ``` + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md new file mode 100644 index 000000000..6d12267b2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md @@ -0,0 +1,75 @@ + +## Zvbc - Vector Carryless Multiplication: + +### [Vector Carryless Multiplication](): + +**Prototypes:** +``` C +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +``` From 6e27c75da7ba5d5b966d9c8c393a38eb17a5ed07 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 10:59:55 -0700 Subject: [PATCH 07/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vclmul.c | 70 +++++++++ .../vector-crypto/api-testing/vclmulh.c | 70 +++++++++ .../vector-crypto/llvm-api-tests/vclmul.c | 71 +++++++++ .../vector-crypto/llvm-api-tests/vclmulh.c | 71 +++++++++ .../llvm-overloaded-tests/vclmul.c | 71 +++++++++ .../llvm-overloaded-tests/vclmulh.c | 71 +++++++++ .../overloaded-api-testing/vclmul.c | 70 +++++++++ .../overloaded-api-testing/vclmulh.c | 70 +++++++++ .../policy_funcs/api-testing/vclmul.c | 134 +++++++++++++++++ .../policy_funcs/api-testing/vclmulh.c | 134 +++++++++++++++++ .../policy_funcs/llvm-api-tests/vclmul.c | 135 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vclmulh.c | 135 ++++++++++++++++++ .../llvm-overloaded-tests/vclmul.c | 135 ++++++++++++++++++ .../llvm-overloaded-tests/vclmulh.c | 135 ++++++++++++++++++ .../overloaded-api-testing/vclmul.c | 134 +++++++++++++++++ .../overloaded-api-testing/vclmulh.c | 134 +++++++++++++++++ 16 files changed, 1640 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vclmul.c create mode 100644 auto-generated/vector-crypto/api-testing/vclmulh.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vclmul.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vclmulh.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vclmul.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c diff --git a/auto-generated/vector-crypto/api-testing/vclmul.c b/auto-generated/vector-crypto/api-testing/vclmul.c new file mode 100644 index 000000000..615da37c2 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclmul.c @@ -0,0 +1,70 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vclmulh.c b/auto-generated/vector-crypto/api-testing/vclmulh.c new file mode 100644 index 000000000..37795dc1a --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclmulh.c @@ -0,0 +1,70 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c new file mode 100644 index 000000000..a56321bd7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c @@ -0,0 +1,71 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c new file mode 100644 index 000000000..0772acf6d --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c @@ -0,0 +1,71 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c new file mode 100644 index 000000000..36cdfb21e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c @@ -0,0 +1,71 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c new file mode 100644 index 000000000..f5343fa97 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c @@ -0,0 +1,71 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c new file mode 100644 index 000000000..f751b2175 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c @@ -0,0 +1,70 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c new file mode 100644 index 000000000..c7a9d9d6d --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c @@ -0,0 +1,70 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c new file mode 100644 index 000000000..bc3add0ee --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c @@ -0,0 +1,134 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c new file mode 100644 index 000000000..7ca88e340 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c @@ -0,0 +1,134 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c new file mode 100644 index 000000000..be7944419 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c @@ -0,0 +1,135 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c new file mode 100644 index 000000000..053782475 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c @@ -0,0 +1,135 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c new file mode 100644 index 000000000..15bc5f9df --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c @@ -0,0 +1,135 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c new file mode 100644 index 000000000..cdcb58c88 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c @@ -0,0 +1,135 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c new file mode 100644 index 000000000..3fe950acd --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c @@ -0,0 +1,134 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c new file mode 100644 index 000000000..cb04c9935 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c @@ -0,0 +1,134 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + From 211b849c090f9f155d4a3feb7ec8164632ecc9ba Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 04:15:19 -0700 Subject: [PATCH 08/44] [vector-crypto] Define intrinsics for the Zvkg extension Signed-off-by: eop Chen --- .../templates/vector_crypto_template.py | 22 +++++++++++++++---- .../rvv_intrinsic_gen/vector_crypto_inst.py | 14 ++++++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index ffcc66908..638964d3d 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -25,11 +25,20 @@ # Zvbc: Vector Carryless Multiplication operand_mnemonic_dict["vclmul"] = ["vv", "vx"] operand_mnemonic_dict["vclmulh"] = ["vv", "vx"] +# Zvkg: Vector GCM/GMAC +operand_mnemonic_dict["vghsh"] = ["vv"] +operand_mnemonic_dict["vgmul"] = ["vv"] + + +def has_vd_input(name): + has_vd_input_inst_set = {"vghsh", "vgmul"} + + return name in has_vd_input_inst_set def has_vs1_input(name): has_vs1_input_inst_set = { - "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh" + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh", "vghsh" } return name in has_vs1_input_inst_set @@ -81,10 +90,15 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): else: kwargs["return_type"] = type_helper.v kwargs = {**kwargs, **decorator.mask_args(type_helper.m, type_helper.v)} - if op == "vwsll": - kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.wv)} + # If vd is already in the input parameter, we don't need to emit another + # parameter when tail policy is TU. + if has_vd_input(op): + kwargs["vd"] = type_helper.v else: - kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.v)} + if op == "vwsll": + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.wv)} + else: + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.v)} kwargs["vs2"] = type_helper.v diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index e5b2625b9..651e54b3e 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -77,3 +77,17 @@ def gen(g): decorators.has_masking_maskedoff_policy) #################################################################### + + g.start_group("Zvkg - Vector GCM/GMAC") + + g.function_group( + vector_crypto_template, + "Vector GCM/GMAC", + "", # FIXME: We probably have a separate document for vector-crypto + ["vghsh", "vgmul"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### From 1a4be9ac09254796ee540fb6e6634dfc800ff41b Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 10:59:59 -0700 Subject: [PATCH 09/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 18 ++++++++++++++++++ .../02_zvkg_-_vector_gcm_gmac.md | 18 ++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 18 ++++++++++++++++++ .../02_zvkg_-_vector_gcm_gmac.md | 18 ++++++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 18 ++++++++++++++++++ .../02_zvkg_-_vector_gcm_gmac.md | 18 ++++++++++++++++++ .../policy_funcs/overloaded_intrinsic_funcs.md | 18 ++++++++++++++++++ .../02_zvkg_-_vector_gcm_gmac.md | 18 ++++++++++++++++++ 8 files changed, 144 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 72aff8e5e..1bc5b01bf 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -620,3 +620,21 @@ vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_ vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); ``` + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md new file mode 100644 index 000000000..5e3e8fcf8 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md @@ -0,0 +1,18 @@ + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index ecbd3ea5c..769cd3dd4 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -620,3 +620,21 @@ vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); ``` + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md new file mode 100644 index 000000000..0b3bf1254 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md @@ -0,0 +1,18 @@ + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index a402d3329..284440cf5 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1026,3 +1026,21 @@ vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); ``` + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md new file mode 100644 index 000000000..0cd0c65e3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md @@ -0,0 +1,18 @@ + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index e9d67b88e..05ec13669 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1026,3 +1026,21 @@ vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4 vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); ``` + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md new file mode 100644 index 000000000..0f44b8ea2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md @@ -0,0 +1,18 @@ + +## Zvkg - Vector GCM/GMAC: + +### [Vector GCM/GMAC](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vghsh_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` From 58a22a205d44a1ecbe4cfd6d39362b15510e7e09 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:00 -0700 Subject: [PATCH 10/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vghsh.c | 26 ++++++++++++++++++ .../vector-crypto/api-testing/vgmul.c | 26 ++++++++++++++++++ .../vector-crypto/llvm-api-tests/vghsh.c | 27 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vgmul.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vghsh.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vgmul.c | 27 +++++++++++++++++++ .../overloaded-api-testing/vghsh.c | 26 ++++++++++++++++++ .../overloaded-api-testing/vgmul.c | 26 ++++++++++++++++++ .../policy_funcs/api-testing/vghsh.c | 26 ++++++++++++++++++ .../policy_funcs/api-testing/vgmul.c | 26 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vghsh.c | 27 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vgmul.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vghsh.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vgmul.c | 27 +++++++++++++++++++ .../overloaded-api-testing/vghsh.c | 26 ++++++++++++++++++ .../overloaded-api-testing/vgmul.c | 26 ++++++++++++++++++ 16 files changed, 424 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vghsh.c create mode 100644 auto-generated/vector-crypto/api-testing/vgmul.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vghsh.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vgmul.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vghsh.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vgmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c diff --git a/auto-generated/vector-crypto/api-testing/vghsh.c b/auto-generated/vector-crypto/api-testing/vghsh.c new file mode 100644 index 000000000..b93ebfa2f --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vghsh.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vgmul.c b/auto-generated/vector-crypto/api-testing/vgmul.c new file mode 100644 index 000000000..09521d4d0 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vgmul.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c new file mode 100644 index 000000000..71dcf52e5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c new file mode 100644 index 000000000..a39f3c8c0 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c new file mode 100644 index 000000000..5940884a9 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c new file mode 100644 index 000000000..4d254ff6c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c new file mode 100644 index 000000000..8a4eb46a5 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c new file mode 100644 index 000000000..48c480933 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c new file mode 100644 index 000000000..48ec0cb4b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c new file mode 100644 index 000000000..13b28496d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c new file mode 100644 index 000000000..e8271d882 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c new file mode 100644 index 000000000..9f725f34a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c new file mode 100644 index 000000000..4c246ad78 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c new file mode 100644 index 000000000..5bad9f0f6 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c new file mode 100644 index 000000000..eeb1718a4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c new file mode 100644 index 000000000..a50b7e4a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + From b0896b34e8a9c07d0ef1f4e94be1c7c16808cdc1 Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 04:45:56 -0700 Subject: [PATCH 11/44] [vector-crypto] Define intrinsics for the Zvkned extension Signed-off-by: eop Chen --- .../rvv_intrinsic_gen/generator.py | 3 ++ .../templates/vector_crypto_template.py | 17 ++++++- .../rvv_intrinsic_gen/vector_crypto_inst.py | 44 +++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 1f4f9ada9..97a92f6b9 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -580,6 +580,9 @@ def output_call_arg(arg_name, type_name): if arg_name == "frm": return "__RISCV_FRM_RNE" + if arg_name == "uimm": + return "0" + return arg_name # Write test func body. diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index 638964d3d..c7201db9c 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -28,10 +28,20 @@ # Zvkg: Vector GCM/GMAC operand_mnemonic_dict["vghsh"] = ["vv"] operand_mnemonic_dict["vgmul"] = ["vv"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vaesef"] = ["vv", "vs"] +operand_mnemonic_dict["vaesem"] = ["vv", "vs"] +operand_mnemonic_dict["vaesdf"] = ["vv", "vs"] +operand_mnemonic_dict["vaesdm"] = ["vv", "vs"] +operand_mnemonic_dict["vaeskf1"] = ["vi"] +operand_mnemonic_dict["vaeskf2"] = ["vi"] +operand_mnemonic_dict["vaesz"] = ["vs"] def has_vd_input(name): - has_vd_input_inst_set = {"vghsh", "vgmul"} + has_vd_input_inst_set = { + "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz" + } return name in has_vd_input_inst_set @@ -76,6 +86,9 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): else: inst_info = InstInfo.get(args, decorator, InstType.VX, ExtraAttr.NO_ATTR) + elif operand_mnemonic == "vi": + inst_info = InstInfo.get(args, decorator, InstType.VI, + ExtraAttr.NO_ATTR) elif operand_mnemonic == "v": inst_info = InstInfo.get(args, decorator, InstType.V, ExtraAttr.NO_ATTR) @@ -109,6 +122,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): kwargs["rs1"] = type_helper.size_t else: kwargs["rs1"] = type_helper.s + if "vi" in operand_mnemonic_dict[op]: + kwargs["uimm"] = type_helper.size_t kwargs["vl"] = type_helper.size_t diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index 651e54b3e..f25cc6562 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -91,3 +91,47 @@ def gen(g): decorators.has_no_masking_policy) #################################################################### + + g.start_group("Zvkned - NIST Suite: Vector AES Block Cipher") + + g.function_group( + vector_crypto_template, + "Vector AES Encryption", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesef", "vaesem"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector AES Decryption", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesdf", "vaesdm"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector AES-128 Forward KeySchedule generation", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaeskf1", "vaeskf2"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector AES round zero", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesz"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### From 94e2085005a7f73c598bfb1ace4d990fec3884b6 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:03 -0700 Subject: [PATCH 12/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 81 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 81 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 81 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 81 +++++++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 81 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 81 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 81 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 81 +++++++++++++++++++ 8 files changed, 648 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 1bc5b01bf..f5dd142d2 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -638,3 +638,84 @@ vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md new file mode 100644 index 000000000..c059b3516 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -0,0 +1,81 @@ + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 769cd3dd4..d4ed46f48 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -638,3 +638,84 @@ vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md new file mode 100644 index 000000000..3ff935a8c --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -0,0 +1,81 @@ + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 284440cf5..673d81711 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1044,3 +1044,84 @@ vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t v vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md new file mode 100644 index 000000000..815e0f4ea --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -0,0 +1,81 @@ + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index 05ec13669..3d0d018ff 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1044,3 +1044,84 @@ vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md new file mode 100644 index 000000000..d91cc9aee --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -0,0 +1,81 @@ + +## Zvkned - NIST Suite: Vector AES Block Cipher: + +### [Vector AES Encryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES Decryption](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` + +### [Vector AES-128 Forward KeySchedule generation](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector AES round zero](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` From 4d9e7cc2bfcb2f4b68fa111216396f6b01cad63f Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:04 -0700 Subject: [PATCH 13/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vaesdf.c | 46 ++++++++++++++++++ .../vector-crypto/api-testing/vaesdm.c | 46 ++++++++++++++++++ .../vector-crypto/api-testing/vaesef.c | 46 ++++++++++++++++++ .../vector-crypto/api-testing/vaesem.c | 46 ++++++++++++++++++ .../vector-crypto/api-testing/vaeskf1.c | 26 ++++++++++ .../vector-crypto/api-testing/vaeskf2.c | 26 ++++++++++ .../vector-crypto/api-testing/vaesz.c | 26 ++++++++++ .../vector-crypto/llvm-api-tests/vaesdf.c | 47 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesdm.c | 47 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesef.c | 47 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesem.c | 47 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaeskf1.c | 27 +++++++++++ .../vector-crypto/llvm-api-tests/vaeskf2.c | 27 +++++++++++ .../vector-crypto/llvm-api-tests/vaesz.c | 27 +++++++++++ .../llvm-overloaded-tests/vaesdf.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaesdm.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaesef.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaesem.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaeskf1.c | 27 +++++++++++ .../llvm-overloaded-tests/vaeskf2.c | 27 +++++++++++ .../llvm-overloaded-tests/vaesz.c | 27 +++++++++++ .../overloaded-api-testing/vaesdf.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaesdm.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaesef.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaesem.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaeskf1.c | 26 ++++++++++ .../overloaded-api-testing/vaeskf2.c | 26 ++++++++++ .../overloaded-api-testing/vaesz.c | 26 ++++++++++ .../policy_funcs/api-testing/vaesdf.c | 46 ++++++++++++++++++ .../policy_funcs/api-testing/vaesdm.c | 46 ++++++++++++++++++ .../policy_funcs/api-testing/vaesef.c | 46 ++++++++++++++++++ .../policy_funcs/api-testing/vaesem.c | 46 ++++++++++++++++++ .../policy_funcs/api-testing/vaeskf1.c | 26 ++++++++++ .../policy_funcs/api-testing/vaeskf2.c | 26 ++++++++++ .../policy_funcs/api-testing/vaesz.c | 26 ++++++++++ .../policy_funcs/llvm-api-tests/vaesdf.c | 47 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesdm.c | 47 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesef.c | 47 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesem.c | 47 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaeskf1.c | 27 +++++++++++ .../policy_funcs/llvm-api-tests/vaeskf2.c | 27 +++++++++++ .../policy_funcs/llvm-api-tests/vaesz.c | 27 +++++++++++ .../llvm-overloaded-tests/vaesdf.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaesdm.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaesef.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaesem.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vaeskf1.c | 27 +++++++++++ .../llvm-overloaded-tests/vaeskf2.c | 27 +++++++++++ .../llvm-overloaded-tests/vaesz.c | 27 +++++++++++ .../overloaded-api-testing/vaesdf.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaesdm.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaesef.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaesem.c | 46 ++++++++++++++++++ .../overloaded-api-testing/vaeskf1.c | 26 ++++++++++ .../overloaded-api-testing/vaeskf2.c | 26 ++++++++++ .../overloaded-api-testing/vaesz.c | 26 ++++++++++ 56 files changed, 2124 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vaesdf.c create mode 100644 auto-generated/vector-crypto/api-testing/vaesdm.c create mode 100644 auto-generated/vector-crypto/api-testing/vaesef.c create mode 100644 auto-generated/vector-crypto/api-testing/vaesem.c create mode 100644 auto-generated/vector-crypto/api-testing/vaeskf1.c create mode 100644 auto-generated/vector-crypto/api-testing/vaeskf2.c create mode 100644 auto-generated/vector-crypto/api-testing/vaesz.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaesdf.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaesdm.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaesef.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaesem.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vaesz.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaesef.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaesem.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vaesz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c diff --git a/auto-generated/vector-crypto/api-testing/vaesdf.c b/auto-generated/vector-crypto/api-testing/vaesdf.c new file mode 100644 index 000000000..17cb54972 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesdf.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vaesdm.c b/auto-generated/vector-crypto/api-testing/vaesdm.c new file mode 100644 index 000000000..057d8afd8 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesdm.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vaesef.c b/auto-generated/vector-crypto/api-testing/vaesef.c new file mode 100644 index 000000000..3576a1511 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesef.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vaesem.c b/auto-generated/vector-crypto/api-testing/vaesem.c new file mode 100644 index 000000000..11a17faa0 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesem.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vaeskf1.c b/auto-generated/vector-crypto/api-testing/vaeskf1.c new file mode 100644 index 000000000..3e31056e0 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaeskf1.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vaeskf2.c b/auto-generated/vector-crypto/api-testing/vaeskf2.c new file mode 100644 index 000000000..8efafda00 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaeskf2.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vaesz.c b/auto-generated/vector-crypto/api-testing/vaesz.c new file mode 100644 index 000000000..1d1e99eb2 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesz.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c new file mode 100644 index 000000000..f2a136f0e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c new file mode 100644 index 000000000..22bc1f416 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c new file mode 100644 index 000000000..e9a5fd0cb --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c new file mode 100644 index 000000000..707d6ef10 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c new file mode 100644 index 000000000..bd2625c66 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c new file mode 100644 index 000000000..c83da63c1 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c new file mode 100644 index 000000000..f219721ca --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c new file mode 100644 index 000000000..23d4151c5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c new file mode 100644 index 000000000..6769c8d21 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c new file mode 100644 index 000000000..51a65a3aa --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c new file mode 100644 index 000000000..4c25db1cc --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c new file mode 100644 index 000000000..3fa9b9126 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c new file mode 100644 index 000000000..7060fdf6e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c new file mode 100644 index 000000000..7f64b61e4 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c new file mode 100644 index 000000000..968f0f6d0 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c new file mode 100644 index 000000000..070daf1cb --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c new file mode 100644 index 000000000..33b2f940a --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c new file mode 100644 index 000000000..33eb27e22 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c new file mode 100644 index 000000000..595213fe1 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c new file mode 100644 index 000000000..9c6fe0e9b --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c new file mode 100644 index 000000000..21b840e2a --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c new file mode 100644 index 000000000..296d2e28d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c new file mode 100644 index 000000000..227aa0c7d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c new file mode 100644 index 000000000..74edec47c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c new file mode 100644 index 000000000..838abfc41 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c new file mode 100644 index 000000000..31bae4be0 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c new file mode 100644 index 000000000..da2024633 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c new file mode 100644 index 000000000..d0b5008ff --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c new file mode 100644 index 000000000..6eae5528a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c new file mode 100644 index 000000000..39900c92e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c new file mode 100644 index 000000000..29c44c80d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c new file mode 100644 index 000000000..48a92787a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c new file mode 100644 index 000000000..6dc0b6dba --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c new file mode 100644 index 000000000..17b588d02 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c new file mode 100644 index 000000000..bfc56949d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c new file mode 100644 index 000000000..3caa6a027 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c new file mode 100644 index 000000000..8c6cca9f9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c new file mode 100644 index 000000000..90a6e891e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c new file mode 100644 index 000000000..6eef057b9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c new file mode 100644 index 000000000..a503020a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c new file mode 100644 index 000000000..2c459fd63 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c new file mode 100644 index 000000000..cbe231c57 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c new file mode 100644 index 000000000..bbd18b8f9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c new file mode 100644 index 000000000..9c8089587 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c new file mode 100644 index 000000000..0afd0df05 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c new file mode 100644 index 000000000..91d2cb885 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c new file mode 100644 index 000000000..c37bb0a86 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c new file mode 100644 index 000000000..4f0e78cf9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c new file mode 100644 index 000000000..7cbbb2e4f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + From 0dcec1b2640c14ebb4dcd771c2be69f9769389c7 Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 04:53:47 -0700 Subject: [PATCH 14/44] [vector-crypto] Define intrinsics for the Zvknh[ab] extension Signed-off-by: eop Chen --- .../templates/vector_crypto_template.py | 10 ++++++-- .../rvv_intrinsic_gen/vector_crypto_inst.py | 25 +++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index c7201db9c..eed16e39e 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -36,11 +36,16 @@ operand_mnemonic_dict["vaeskf1"] = ["vi"] operand_mnemonic_dict["vaeskf2"] = ["vi"] operand_mnemonic_dict["vaesz"] = ["vs"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vsha2ms"] = ["vv"] +operand_mnemonic_dict["vsha2ch"] = ["vv"] +operand_mnemonic_dict["vsha2cl"] = ["vv"] def has_vd_input(name): has_vd_input_inst_set = { - "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz" + "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz", + "vsha2ms", "vsha2ch", "vsha2cl" } return name in has_vd_input_inst_set @@ -48,7 +53,8 @@ def has_vd_input(name): def has_vs1_input(name): has_vs1_input_inst_set = { - "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh", "vghsh" + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh", "vghsh", "vsha2ms", + "vsha2ch", "vsha2cl" } return name in has_vs1_input_inst_set diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index f25cc6562..f6b9d63c5 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -135,3 +135,28 @@ def gen(g): decorators.has_no_masking_policy) #################################################################### + + g.start_group("Zvknh - NIST Suite: Vector SHA-2 Secure Hash") + + g.function_group( + vector_crypto_template, + "Vector SHA-2 message schedule", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsha2ms"], + UITYPE, + [32, 64], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector SHA-2 two rounds of compression", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsha2ch", "vsha2cl"], + UITYPE, + [32, 64], + LMULS, + decorators.has_no_masking_policy) + + +#################################################################### From 29386c9b9f462d81c81840ee0a0047ad63506568 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:06 -0700 Subject: [PATCH 15/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 41 +++++++++++++++++++ ..._-_nist_suite:_vector_sha-2_secure_hash.md | 41 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 41 +++++++++++++++++++ ..._-_nist_suite:_vector_sha-2_secure_hash.md | 41 +++++++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 41 +++++++++++++++++++ ..._-_nist_suite:_vector_sha-2_secure_hash.md | 41 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 41 +++++++++++++++++++ ..._-_nist_suite:_vector_sha-2_secure_hash.md | 41 +++++++++++++++++++ 8 files changed, 328 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index f5dd142d2..335b10f14 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -719,3 +719,44 @@ vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md new file mode 100644 index 000000000..90db92cd4 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md @@ -0,0 +1,41 @@ + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index d4ed46f48..15ed599bd 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -719,3 +719,44 @@ vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md new file mode 100644 index 000000000..2b8a36920 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md @@ -0,0 +1,41 @@ + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 673d81711..fb6b81b42 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1125,3 +1125,44 @@ vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t v vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md new file mode 100644 index 000000000..c6a2a611f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md @@ -0,0 +1,41 @@ + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index 3d0d018ff..ff694aef4 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1125,3 +1125,44 @@ vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md new file mode 100644 index 000000000..7f060208e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md @@ -0,0 +1,41 @@ + +## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: + +### [Vector SHA-2 message schedule](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ms_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` + +### [Vector SHA-2 two rounds of compression](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsha2ch_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +``` From 1b179fe2b506ff4ccdd49932e51310e4b9bc0bdd Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:07 -0700 Subject: [PATCH 16/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vsha2ch.c | 42 ++++++++++++++++++ .../vector-crypto/api-testing/vsha2cl.c | 42 ++++++++++++++++++ .../vector-crypto/api-testing/vsha2ms.c | 42 ++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsha2ch.c | 43 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsha2cl.c | 43 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsha2ms.c | 43 +++++++++++++++++++ .../llvm-overloaded-tests/vsha2ch.c | 43 +++++++++++++++++++ .../llvm-overloaded-tests/vsha2cl.c | 43 +++++++++++++++++++ .../llvm-overloaded-tests/vsha2ms.c | 43 +++++++++++++++++++ .../overloaded-api-testing/vsha2ch.c | 42 ++++++++++++++++++ .../overloaded-api-testing/vsha2cl.c | 42 ++++++++++++++++++ .../overloaded-api-testing/vsha2ms.c | 42 ++++++++++++++++++ .../policy_funcs/api-testing/vsha2ch.c | 42 ++++++++++++++++++ .../policy_funcs/api-testing/vsha2cl.c | 42 ++++++++++++++++++ .../policy_funcs/api-testing/vsha2ms.c | 42 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsha2ch.c | 43 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsha2cl.c | 43 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsha2ms.c | 43 +++++++++++++++++++ .../llvm-overloaded-tests/vsha2ch.c | 43 +++++++++++++++++++ .../llvm-overloaded-tests/vsha2cl.c | 43 +++++++++++++++++++ .../llvm-overloaded-tests/vsha2ms.c | 43 +++++++++++++++++++ .../overloaded-api-testing/vsha2ch.c | 42 ++++++++++++++++++ .../overloaded-api-testing/vsha2cl.c | 42 ++++++++++++++++++ .../overloaded-api-testing/vsha2ms.c | 42 ++++++++++++++++++ 24 files changed, 1020 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vsha2ch.c create mode 100644 auto-generated/vector-crypto/api-testing/vsha2cl.c create mode 100644 auto-generated/vector-crypto/api-testing/vsha2ms.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c diff --git a/auto-generated/vector-crypto/api-testing/vsha2ch.c b/auto-generated/vector-crypto/api-testing/vsha2ch.c new file mode 100644 index 000000000..8407a75e1 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2ch.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vsha2cl.c b/auto-generated/vector-crypto/api-testing/vsha2cl.c new file mode 100644 index 000000000..e7a37c2e7 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2cl.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vsha2ms.c b/auto-generated/vector-crypto/api-testing/vsha2ms.c new file mode 100644 index 000000000..65b6fc728 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2ms.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c new file mode 100644 index 000000000..046495c35 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c new file mode 100644 index 000000000..442946790 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c new file mode 100644 index 000000000..76cf625eb --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c new file mode 100644 index 000000000..63d6c5aea --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c new file mode 100644 index 000000000..c16a3b774 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c new file mode 100644 index 000000000..c795ac036 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c new file mode 100644 index 000000000..e581f6f43 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c new file mode 100644 index 000000000..9a839357b --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c new file mode 100644 index 000000000..c6d912d62 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c new file mode 100644 index 000000000..9940b82c2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c new file mode 100644 index 000000000..11360869d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c new file mode 100644 index 000000000..b9e9f83b2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c new file mode 100644 index 000000000..42785c045 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c new file mode 100644 index 000000000..d3aa58e49 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c new file mode 100644 index 000000000..1641cffed --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c new file mode 100644 index 000000000..d83ec593f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c new file mode 100644 index 000000000..7f9c2327b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c new file mode 100644 index 000000000..6648d4381 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c @@ -0,0 +1,43 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c new file mode 100644 index 000000000..cf1afc07f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c new file mode 100644 index 000000000..a385bfd49 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c new file mode 100644 index 000000000..ae5e74fff --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c @@ -0,0 +1,42 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + From 295f8ac20954624e4dfb4dcfc9f34799da8eea6a Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 05:02:45 -0700 Subject: [PATCH 17/44] [vector-crypto] Define intrinsics for the Zknsed extension Signed-off-by: eop Chen --- .../rvv_intrinsic_gen/generator.py | 2 +- .../templates/vector_crypto_template.py | 5 +++- .../rvv_intrinsic_gen/vector_crypto_inst.py | 23 +++++++++++++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 97a92f6b9..8f1653ccb 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -257,7 +257,7 @@ def get_overloaded_op_name(name): overloaded_name = "_".join([sn[0], sn[1], sn[-1]]) elif any(op in name for op in [ "vzext", "vsext", "vwadd", "vwsub", "vfwadd", "vfwsub", "vwadd", - "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv" + "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv", "vsm4r" ]): # 2. compiler can not distinguish *.wx and *.vx, need encode them in # suffix, for example: diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index eed16e39e..da21ae67f 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -40,12 +40,15 @@ operand_mnemonic_dict["vsha2ms"] = ["vv"] operand_mnemonic_dict["vsha2ch"] = ["vv"] operand_mnemonic_dict["vsha2cl"] = ["vv"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vsm4k"] = ["vi"] +operand_mnemonic_dict["vsm4r"] = ["vv", "vs"] def has_vd_input(name): has_vd_input_inst_set = { "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz", - "vsha2ms", "vsha2ch", "vsha2cl" + "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r" } return name in has_vd_input_inst_set diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index f6b9d63c5..47c40eb59 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -158,5 +158,28 @@ def gen(g): LMULS, decorators.has_no_masking_policy) + #################################################################### + + g.start_group("Zvksed - ShangMi Suite: SM4 Block Cipher") + + g.function_group( + vector_crypto_template, + "Vector SM4 KeyExpansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm4k"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector SM4 Rounds", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm4r"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) #################################################################### From 82cc885d9b0f6a0406f12e0b8046789a4717752e Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:11 -0700 Subject: [PATCH 18/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 29 +++++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 29 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 29 +++++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 29 +++++++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 29 +++++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 29 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 29 +++++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 29 +++++++++++++++++++ 8 files changed, 232 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 335b10f14..3a9b4076b 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -760,3 +760,32 @@ vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2 vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); ``` + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md new file mode 100644 index 000000000..c78e8cbf2 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -0,0 +1,29 @@ + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 15ed599bd..1eb2b3105 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -760,3 +760,32 @@ vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, s vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); ``` + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md new file mode 100644 index 000000000..5e8da0f1a --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -0,0 +1,29 @@ + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index fb6b81b42..1ea845159 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1166,3 +1166,32 @@ vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint6 vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); ``` + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md new file mode 100644 index 000000000..7098d3485 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -0,0 +1,29 @@ + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index ff694aef4..a612e8d27 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1166,3 +1166,32 @@ vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1 vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); ``` + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md new file mode 100644 index 000000000..4f1dce398 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -0,0 +1,29 @@ + +## Zvksed - ShangMi Suite: SM4 Block Cipher: + +### [Vector SM4 KeyExpansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +``` + +### [Vector SM4 Rounds](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +``` From ba15f1bf19d9f577615096a4b42ef62b18e31139 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:12 -0700 Subject: [PATCH 19/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vsm4k.c | 26 ++++++++++ .../vector-crypto/api-testing/vsm4r.c | 46 ++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsm4k.c | 27 +++++++++++ .../vector-crypto/llvm-api-tests/vsm4r.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vsm4k.c | 27 +++++++++++ .../llvm-overloaded-tests/vsm4r.c | 47 +++++++++++++++++++ .../overloaded-api-testing/vsm4k.c | 26 ++++++++++ .../overloaded-api-testing/vsm4r.c | 46 ++++++++++++++++++ .../policy_funcs/api-testing/vsm4k.c | 26 ++++++++++ .../policy_funcs/api-testing/vsm4r.c | 46 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsm4k.c | 27 +++++++++++ .../policy_funcs/llvm-api-tests/vsm4r.c | 47 +++++++++++++++++++ .../llvm-overloaded-tests/vsm4k.c | 27 +++++++++++ .../llvm-overloaded-tests/vsm4r.c | 47 +++++++++++++++++++ .../overloaded-api-testing/vsm4k.c | 26 ++++++++++ .../overloaded-api-testing/vsm4r.c | 46 ++++++++++++++++++ 16 files changed, 584 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vsm4k.c create mode 100644 auto-generated/vector-crypto/api-testing/vsm4r.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsm4k.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsm4r.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c diff --git a/auto-generated/vector-crypto/api-testing/vsm4k.c b/auto-generated/vector-crypto/api-testing/vsm4k.c new file mode 100644 index 000000000..af05ac455 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm4k.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vsm4r.c b/auto-generated/vector-crypto/api-testing/vsm4r.c new file mode 100644 index 000000000..d2c7e2dc5 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm4r.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c new file mode 100644 index 000000000..ed2010cbe --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c new file mode 100644 index 000000000..ace127f46 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c new file mode 100644 index 000000000..831212f2f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c new file mode 100644 index 000000000..c081ecfc5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c new file mode 100644 index 000000000..4f0f38cd7 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c new file mode 100644 index 000000000..ef90532c9 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c new file mode 100644 index 000000000..b5ac42cfc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c new file mode 100644 index 000000000..4f8e84562 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c new file mode 100644 index 000000000..305cb8b84 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c new file mode 100644 index 000000000..66f972add --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c new file mode 100644 index 000000000..b7587ab32 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c new file mode 100644 index 000000000..a8071d89c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -0,0 +1,47 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c new file mode 100644 index 000000000..bff1c2f7f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c new file mode 100644 index 000000000..14dde00cf --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c @@ -0,0 +1,46 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + From da664fb9ea4e5064da5f702d67583e3121f47f08 Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 05:10:27 -0700 Subject: [PATCH 20/44] [vector-crypto] Define intrinsics for the Zvksh extension Signed-off-by: eop Chen --- .../templates/vector_crypto_template.py | 7 ++++-- .../rvv_intrinsic_gen/vector_crypto_inst.py | 25 +++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index da21ae67f..6cab56159 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -43,12 +43,15 @@ # Zvkned: NIST Suite: Vector AES Block Cipher operand_mnemonic_dict["vsm4k"] = ["vi"] operand_mnemonic_dict["vsm4r"] = ["vv", "vs"] +# Zvksh: ShangMi Suite: SM3 Secure Hash +operand_mnemonic_dict["vsm3me"] = ["vv"] +operand_mnemonic_dict["vsm3c"] = ["vi"] def has_vd_input(name): has_vd_input_inst_set = { "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz", - "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r" + "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r", "vsm3c" } return name in has_vd_input_inst_set @@ -57,7 +60,7 @@ def has_vd_input(name): def has_vs1_input(name): has_vs1_input_inst_set = { "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh", "vghsh", "vsha2ms", - "vsha2ch", "vsha2cl" + "vsha2ch", "vsha2cl", "vsm3me" } return name in has_vs1_input_inst_set diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index 47c40eb59..d02482f37 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -182,4 +182,29 @@ def gen(g): LMULS, decorators.has_no_masking_policy) + #################################################################### + + g.start_group("Zvksh - ShangMi Suite: SM3 Secure Hash") + + g.function_group( + vector_crypto_template, + "Vector SM3 Message Expansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm3me"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector SM3 Message Expansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm3c"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### From 5f9eb8b23ce440492a6199bc026ea3c9e7e829ec Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:15 -0700 Subject: [PATCH 21/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 24 +++++++++++++++++++ ..._zvksh_-_shangmi_suite:_sm3_secure_hash.md | 24 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 24 +++++++++++++++++++ ..._zvksh_-_shangmi_suite:_sm3_secure_hash.md | 24 +++++++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 24 +++++++++++++++++++ ..._zvksh_-_shangmi_suite:_sm3_secure_hash.md | 24 +++++++++++++++++++ .../overloaded_intrinsic_funcs.md | 24 +++++++++++++++++++ ..._zvksh_-_shangmi_suite:_sm3_secure_hash.md | 24 +++++++++++++++++++ 8 files changed, 192 insertions(+) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 3a9b4076b..8d3b239fb 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -789,3 +789,27 @@ vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md new file mode 100644 index 000000000..621c42e24 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md @@ -0,0 +1,24 @@ + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 1eb2b3105..7db91b67b 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -789,3 +789,27 @@ vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md new file mode 100644 index 000000000..a904879b0 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md @@ -0,0 +1,24 @@ + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 1ea845159..1c5b4a77f 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1195,3 +1195,27 @@ vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t v vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md new file mode 100644 index 000000000..afc57afff --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md @@ -0,0 +1,24 @@ + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index a612e8d27..e0fd11c20 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1195,3 +1195,27 @@ vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md new file mode 100644 index 000000000..cb93f408d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md @@ -0,0 +1,24 @@ + +## Zvksh - ShangMi Suite: SM3 Secure Hash: + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +``` + +### [Vector SM3 Message Expansion](): + +**Prototypes:** +``` C +vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +``` From fe79ff31279656e827675b6f9beb0ed5f3db463b Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 17 Jul 2023 11:00:16 -0700 Subject: [PATCH 22/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vsm3c.c | 26 ++++++++++++++++++ .../vector-crypto/api-testing/vsm3me.c | 26 ++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsm3c.c | 27 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsm3me.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vsm3c.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vsm3me.c | 27 +++++++++++++++++++ .../overloaded-api-testing/vsm3c.c | 26 ++++++++++++++++++ .../overloaded-api-testing/vsm3me.c | 26 ++++++++++++++++++ .../policy_funcs/api-testing/vsm3c.c | 26 ++++++++++++++++++ .../policy_funcs/api-testing/vsm3me.c | 26 ++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsm3c.c | 27 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsm3me.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vsm3c.c | 27 +++++++++++++++++++ .../llvm-overloaded-tests/vsm3me.c | 27 +++++++++++++++++++ .../overloaded-api-testing/vsm3c.c | 26 ++++++++++++++++++ .../overloaded-api-testing/vsm3me.c | 26 ++++++++++++++++++ 16 files changed, 424 insertions(+) create mode 100644 auto-generated/vector-crypto/api-testing/vsm3c.c create mode 100644 auto-generated/vector-crypto/api-testing/vsm3me.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsm3c.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vsm3me.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c diff --git a/auto-generated/vector-crypto/api-testing/vsm3c.c b/auto-generated/vector-crypto/api-testing/vsm3c.c new file mode 100644 index 000000000..6c82dfe7c --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm3c.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/api-testing/vsm3me.c b/auto-generated/vector-crypto/api-testing/vsm3me.c new file mode 100644 index 000000000..5dd3d4007 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm3me.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c new file mode 100644 index 000000000..1f304271f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c new file mode 100644 index 000000000..ce4673c23 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c new file mode 100644 index 000000000..6bd539576 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c new file mode 100644 index 000000000..1c1ad44b4 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c new file mode 100644 index 000000000..7b204cdfc --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c new file mode 100644 index 000000000..60d967f88 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c new file mode 100644 index 000000000..6551ba803 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c new file mode 100644 index 000000000..3df7ce142 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c new file mode 100644 index 000000000..61854b2d6 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c new file mode 100644 index 000000000..f161c3a7c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c new file mode 100644 index 000000000..15fd70fab --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c new file mode 100644 index 000000000..639a153fc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c @@ -0,0 +1,27 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c new file mode 100644 index 000000000..070583ec5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c new file mode 100644 index 000000000..46ddc2d66 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c @@ -0,0 +1,26 @@ +#include +#include + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + From 144b587b632d3f72ab7c54d069406a4a924826bc Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 1 Jun 2023 05:38:11 -0700 Subject: [PATCH 23/44] [Makefile] Add the vector crypto generation to golden check in CI Signed-off-by: eop Chen --- rvv-intrinsic-generator/Makefile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/rvv-intrinsic-generator/Makefile b/rvv-intrinsic-generator/Makefile index 31de080e5..c789e419a 100644 --- a/rvv-intrinsic-generator/Makefile +++ b/rvv-intrinsic-generator/Makefile @@ -346,6 +346,15 @@ diff-autogen: $(call check_defined, TEST_DIR, output directory for documents/tests generation) rm -rf ${abspath ${TEST_DIR}} make OUTPUT_DIR=${TEST_DIR} + make EXTRA_FLAG=--gen-vector-crypto OUTPUT_DIR=${TEST_DIR}/vector-crypto + +# Remove redundant folder created for vector crypto. The reason this line is +# needed is because the targets in this Makefile to generate compatible header +# creates a folder in prior before running the script. The vector crypto, +# however, does not need compatible header because it does not exist before +# v0.10. + rm -rf ${TEST_DIR}/vector-crypto/rvv-v0p10-compatible-headers + diff -qr ${TEST_DIR} ${GOLDEN_DIR} ############################################################################### From 852142fa15136b9ab3b06565e7aa8f66affe3041 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 31 Jul 2023 23:46:08 -0700 Subject: [PATCH 24/44] [vector-crypto] Add more variants for 'vs' instructions 'vs' instructions will take the first element group from `vs2`, while `vd` can be other settings of register group. This commit adds extra variants for users to choose whatever suits their need. Signed-off-by: eop Chen --- .../templates/vector_crypto_template.py | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index 6cab56159..e11704c7a 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -143,10 +143,25 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["SEW"] = args["WSEW"] args["LMUL"] = args["WLMUL"] - G.func( - inst_info, - name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + - decorator.func_suffix, - **kwargs) + if operand_mnemonic == "vs": + starting_from_lmul_index = lmul_list.index(args["LMUL"]) + # print(starting_from_lmul_index) + for i in range(starting_from_lmul_index, len(lmul_list)): + kwargs["return_type"] =\ + f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" + kwargs["vd"] = f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" + kwargs["vs2"] = f"v{args['TYPE']}{args['SEW']}m{args['LMUL']}_t" + args["LMUL"] = lmul_list[i] + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + else: + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) G.inst_group_epilogue() From dd4f4ac3744c8eeaa7febc1b0725c31002f26d2b Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 00:36:09 -0700 Subject: [PATCH 25/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 60 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 50 ++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 10 ++++ .../overloaded_intrinsic_funcs.md | 60 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 50 ++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 10 ++++ .../policy_funcs/intrinsic_funcs.md | 60 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 50 ++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 10 ++++ .../overloaded_intrinsic_funcs.md | 60 +++++++++++++++++++ ...d_-_nist_suite:_vector_aes_block_cipher.md | 50 ++++++++++++++++ ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 10 ++++ 12 files changed, 480 insertions(+) diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 8d3b239fb..351cd0e14 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -647,22 +647,42 @@ vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -673,22 +693,42 @@ vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl) ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -714,9 +754,19 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -780,12 +830,22 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index c059b3516..e38485a6e 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -7,22 +7,42 @@ ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -33,22 +53,42 @@ vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl) ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -74,8 +114,18 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index c78e8cbf2..e2991d231 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -18,12 +18,22 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 7db91b67b..7ad662f5a 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -647,22 +647,42 @@ vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -673,22 +693,42 @@ vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -714,9 +754,19 @@ vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vs2, size_t uimm, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -780,12 +830,22 @@ vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); ``` C vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 3ff935a8c..23825cb8e 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -7,22 +7,42 @@ ``` C vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -33,22 +53,42 @@ vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -74,8 +114,18 @@ vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vs2, size_t uimm, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index 5e8da0f1a..cd2448263 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -18,12 +18,22 @@ vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); ``` C vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 1c5b4a77f..4134b5604 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1053,22 +1053,42 @@ vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t v ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -1079,22 +1099,42 @@ vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -1120,9 +1160,19 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -1186,12 +1236,22 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, s ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 815e0f4ea..5d13a96cf 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -7,22 +7,42 @@ ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -33,22 +53,42 @@ vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -74,8 +114,18 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index 7098d3485..2de962e21 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -18,12 +18,22 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, s ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index e0fd11c20..f68f11744 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1053,22 +1053,42 @@ vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -1079,22 +1099,42 @@ vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -1120,9 +1160,19 @@ vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t u **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -1186,12 +1236,22 @@ vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uim ``` C vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index d91cc9aee..9fc84bc20 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -7,22 +7,42 @@ ``` C vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -33,22 +53,42 @@ vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` C vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` @@ -74,8 +114,18 @@ vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t u **Prototypes:** ``` C vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index 4f1dce398..7487356cb 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -18,12 +18,22 @@ vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uim ``` C vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` From 122944b87648b33c4294a6f4293898b2e97c9e26 Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 00:36:12 -0700 Subject: [PATCH 26/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vaesdf.c | 40 +++++++++++++++++++ .../vector-crypto/api-testing/vaesdm.c | 40 +++++++++++++++++++ .../vector-crypto/api-testing/vaesef.c | 40 +++++++++++++++++++ .../vector-crypto/api-testing/vaesem.c | 40 +++++++++++++++++++ .../vector-crypto/api-testing/vaesz.c | 40 +++++++++++++++++++ .../vector-crypto/api-testing/vsm4r.c | 40 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesdf.c | 40 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesdm.c | 40 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesef.c | 40 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesem.c | 40 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vaesz.c | 40 +++++++++++++++++++ .../vector-crypto/llvm-api-tests/vsm4r.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesdf.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesdm.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesef.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesem.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesz.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vsm4r.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesdf.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesdm.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesef.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesem.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesz.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vsm4r.c | 40 +++++++++++++++++++ .../policy_funcs/api-testing/vaesdf.c | 40 +++++++++++++++++++ .../policy_funcs/api-testing/vaesdm.c | 40 +++++++++++++++++++ .../policy_funcs/api-testing/vaesef.c | 40 +++++++++++++++++++ .../policy_funcs/api-testing/vaesem.c | 40 +++++++++++++++++++ .../policy_funcs/api-testing/vaesz.c | 40 +++++++++++++++++++ .../policy_funcs/api-testing/vsm4r.c | 40 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesdf.c | 40 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesdm.c | 40 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesef.c | 40 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesem.c | 40 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vaesz.c | 40 +++++++++++++++++++ .../policy_funcs/llvm-api-tests/vsm4r.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesdf.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesdm.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesef.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesem.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vaesz.c | 40 +++++++++++++++++++ .../llvm-overloaded-tests/vsm4r.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesdf.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesdm.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesef.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesem.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vaesz.c | 40 +++++++++++++++++++ .../overloaded-api-testing/vsm4r.c | 40 +++++++++++++++++++ 48 files changed, 1920 insertions(+) diff --git a/auto-generated/vector-crypto/api-testing/vaesdf.c b/auto-generated/vector-crypto/api-testing/vaesdf.c index 17cb54972..ec6e8b067 100644 --- a/auto-generated/vector-crypto/api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/api-testing/vaesdf.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesdm.c b/auto-generated/vector-crypto/api-testing/vaesdm.c index 057d8afd8..dd7a8ab52 100644 --- a/auto-generated/vector-crypto/api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/api-testing/vaesdm.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesef.c b/auto-generated/vector-crypto/api-testing/vaesef.c index 3576a1511..3e26be98e 100644 --- a/auto-generated/vector-crypto/api-testing/vaesef.c +++ b/auto-generated/vector-crypto/api-testing/vaesef.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesem.c b/auto-generated/vector-crypto/api-testing/vaesem.c index 11a17faa0..b47a15900 100644 --- a/auto-generated/vector-crypto/api-testing/vaesem.c +++ b/auto-generated/vector-crypto/api-testing/vaesem.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesz.c b/auto-generated/vector-crypto/api-testing/vaesz.c index 1d1e99eb2..cc4349b45 100644 --- a/auto-generated/vector-crypto/api-testing/vaesz.c +++ b/auto-generated/vector-crypto/api-testing/vaesz.c @@ -8,18 +8,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vsm4r.c b/auto-generated/vector-crypto/api-testing/vsm4r.c index d2c7e2dc5..7c5ff7a51 100644 --- a/auto-generated/vector-crypto/api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/api-testing/vsm4r.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c index f2a136f0e..4c9faed7b 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c index 22bc1f416..9cff36983 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c index e9a5fd0cb..8c7ab8abf 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c index 707d6ef10..d01b30f7c 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c index f219721ca..aad378dba 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -9,18 +9,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c index ace127f46..a37d743e7 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c index 23d4151c5..b7bd2a7b8 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c index 6769c8d21..c23154f3a 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c index 51a65a3aa..fe2d7fee1 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c index 4c25db1cc..abedf1d40 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c index 7f64b61e4..f459e124a 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -9,18 +9,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesz(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c index c081ecfc5..7a1d28756 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c index 968f0f6d0..5dfd28986 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c index 070daf1cb..6a427cc9a 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c index 33b2f940a..dca8acbbc 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c index 33eb27e22..17d8de48b 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c index 21b840e2a..92f09192f 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c @@ -8,18 +8,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesz(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c index ef90532c9..95bd0716a 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c index 296d2e28d..2eefcbc01 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c index 227aa0c7d..97ab441f7 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c index 74edec47c..2bcdbc400 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c index 838abfc41..0f179040e 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c index d0b5008ff..4548ca4e0 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c @@ -8,18 +8,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c index 4f8e84562..e12f9028d 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c index 6eae5528a..7b77ff31d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c index 39900c92e..b7f84c4b7 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c index 29c44c80d..c1debf192 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c index 48a92787a..e5dc8630c 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c index bfc56949d..8d48a65f2 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -9,18 +9,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c index 66f972add..245a6ae12 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c index 3caa6a027..b15017b19 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c index 8c6cca9f9..b9933247b 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c index 90a6e891e..a0c60bc29 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c index 6eef057b9..1cd5624ca 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c index cbe231c57..e9cd85400 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -9,18 +9,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c index a8071d89c..f8612d784 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -13,6 +13,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } @@ -21,6 +37,18 @@ vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } @@ -29,6 +57,14 @@ vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } @@ -37,6 +73,10 @@ vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c index bbd18b8f9..4e39e8e29 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdf_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c index 9c8089587..2e1a59bb6 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdm_tu(vd, vs2, vl); } +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c index 0afd0df05..849fc43e6 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesef_tu(vd, vs2, vl); } +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c index 91d2cb885..ff158a365 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesem_tu(vd, vs2, vl); } +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c index 7cbbb2e4f..40c8e9cb3 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c @@ -8,18 +8,58 @@ vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c index 14dde00cf..abf418bb9 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c @@ -12,6 +12,22 @@ vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } @@ -20,6 +36,18 @@ vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } @@ -28,6 +56,14 @@ vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } @@ -36,6 +72,10 @@ vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } From efe7b4cb98776e268acb69d7ab29bdf20421310b Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 00:30:56 -0700 Subject: [PATCH 27/44] [vector-crypto] Document the availability for vector crypto intrinsics regarding zvl extensions Signed-off-by: eop Chen --- vector_crypto_notes.adoc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 vector_crypto_notes.adoc diff --git a/vector_crypto_notes.adoc b/vector_crypto_notes.adoc new file mode 100644 index 000000000..e9c60396e --- /dev/null +++ b/vector_crypto_notes.adoc @@ -0,0 +1,15 @@ += Note for vector crypto intrinsics + +== Availability of vector crypto intrinsics + +Availability for the vector crypto instruction intrinsics will depend on the minimum vector length specified in the architecture via the `Zvl*b` ^0^ sub-extension. Vector length is required to be at least one EGW (element group width ^1^) long. + +Take the intrinsic of `vaesdf.vs` as an example. Given that the instruction will compute with a single element group provided from `vs2`, `vuint32mf2_t` of must be at least 128 bits long. Therefore the intrinsic requires `zvl256b` to be available. + +``` +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +``` + +^0^ https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#181-zvl-minimum-vector-length-standard-extensions[v-spec 18.1. Zvl*: Minimum Vector Length Standard Extensions] + +^1^ https://github.com/riscv/riscv-crypto/blob/master/doc/vector/riscv-crypto-vector-element-groups.adoc[Vector Crypto Specification: Element Groups] From 65b2b980d97e13357027801b60704e1701114793 Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 00:39:06 -0700 Subject: [PATCH 28/44] [vector-crypto] Remove redundant variable in testing function declaration Signed-off-by: eop Chen --- rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 8f1653ccb..09f6a86b3 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -542,6 +542,7 @@ def func(self, inst_info, name, return_type, **kwargs): # For "vxrm" parameter of the fixed-point intrinsics, value for it must be # an immediate. func_decl = func_decl.replace(", unsigned int vxrm", "") + func_decl = func_decl.replace(", size_t uimm", "") # For "frm" parameter of the floating-point intrinsics, value for it must # be an immediate. From 9790b4d6a5baad0a90dd9a0678d7091fc2b243be Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 00:39:37 -0700 Subject: [PATCH 29/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- auto-generated/vector-crypto/api-testing/vaeskf1.c | 10 +++++----- auto-generated/vector-crypto/api-testing/vaeskf2.c | 10 +++++----- auto-generated/vector-crypto/api-testing/vsm3c.c | 10 +++++----- auto-generated/vector-crypto/api-testing/vsm4k.c | 10 +++++----- auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c | 10 +++++----- auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c | 10 +++++----- auto-generated/vector-crypto/llvm-api-tests/vsm3c.c | 10 +++++----- auto-generated/vector-crypto/llvm-api-tests/vsm4k.c | 10 +++++----- .../vector-crypto/llvm-overloaded-tests/vaeskf1.c | 10 +++++----- .../vector-crypto/llvm-overloaded-tests/vaeskf2.c | 10 +++++----- .../vector-crypto/llvm-overloaded-tests/vsm3c.c | 10 +++++----- .../vector-crypto/llvm-overloaded-tests/vsm4k.c | 10 +++++----- .../vector-crypto/overloaded-api-testing/vaeskf1.c | 10 +++++----- .../vector-crypto/overloaded-api-testing/vaeskf2.c | 10 +++++----- .../vector-crypto/overloaded-api-testing/vsm3c.c | 10 +++++----- .../vector-crypto/overloaded-api-testing/vsm4k.c | 10 +++++----- .../vector-crypto/policy_funcs/api-testing/vaeskf1.c | 10 +++++----- .../vector-crypto/policy_funcs/api-testing/vaeskf2.c | 10 +++++----- .../vector-crypto/policy_funcs/api-testing/vsm3c.c | 10 +++++----- .../vector-crypto/policy_funcs/api-testing/vsm4k.c | 10 +++++----- .../policy_funcs/llvm-api-tests/vaeskf1.c | 10 +++++----- .../policy_funcs/llvm-api-tests/vaeskf2.c | 10 +++++----- .../vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c | 10 +++++----- .../vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c | 10 +++++----- .../policy_funcs/llvm-overloaded-tests/vaeskf1.c | 10 +++++----- .../policy_funcs/llvm-overloaded-tests/vaeskf2.c | 10 +++++----- .../policy_funcs/llvm-overloaded-tests/vsm3c.c | 10 +++++----- .../policy_funcs/llvm-overloaded-tests/vsm4k.c | 10 +++++----- .../policy_funcs/overloaded-api-testing/vaeskf1.c | 10 +++++----- .../policy_funcs/overloaded-api-testing/vaeskf2.c | 10 +++++----- .../policy_funcs/overloaded-api-testing/vsm3c.c | 10 +++++----- .../policy_funcs/overloaded-api-testing/vsm4k.c | 10 +++++----- 32 files changed, 160 insertions(+), 160 deletions(-) diff --git a/auto-generated/vector-crypto/api-testing/vaeskf1.c b/auto-generated/vector-crypto/api-testing/vaeskf1.c index 3e31056e0..0d55e93ac 100644 --- a/auto-generated/vector-crypto/api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/api-testing/vaeskf1.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaeskf2.c b/auto-generated/vector-crypto/api-testing/vaeskf2.c index 8efafda00..50cf20d1b 100644 --- a/auto-generated/vector-crypto/api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32mf2(vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m1(vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m2(vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m4(vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vsm3c.c b/auto-generated/vector-crypto/api-testing/vsm3c.c index 6c82dfe7c..355f4a519 100644 --- a/auto-generated/vector-crypto/api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/api-testing/vsm3c.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vsm4k.c b/auto-generated/vector-crypto/api-testing/vsm4k.c index af05ac455..d038e7157 100644 --- a/auto-generated/vector-crypto/api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/api-testing/vsm4k.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c index bd2625c66..f35c4a3b2 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c index c83da63c1..036a12b52 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32mf2(vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m1(vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m2(vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m4(vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c index 1f304271f..87af91e5c 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c index ed2010cbe..0911bd722 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c index 3fa9b9126..e15daf77d 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c index 7060fdf6e..544e99fef 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c index 6bd539576..416f7a64f 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c index 831212f2f..319a815f8 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c index 595213fe1..8ec38cde4 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c index 9c6fe0e9b..660b0ba39 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c index 7b204cdfc..a5bdb447f 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c index 4f0f38cd7..06728e8dd 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c index 31bae4be0..97339218d 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c index da2024633..3fcb9e9b4 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32mf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m4_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c index 6551ba803..b0b2246a3 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c index b5ac42cfc..05dc6da60 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c index 6dc0b6dba..36bc372bc 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c index 17b588d02..448191a2f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32mf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m4_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c index 61854b2d6..e64ed6ab7 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c index 305cb8b84..a702fee72 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c index a503020a9..32fb898ca 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c index 2c459fd63..605ff4a98 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c index 15fd70fab..9cbfd29cf 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c index b7587ab32..bd902bfba 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c index c37bb0a86..f531dd6af 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c index 4f0e78cf9..c43fbbb09 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c index 070583ec5..b784b6537 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c index bff1c2f7f..e1f938477 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); } From 3c1f66d78695d1bebbca07d297e49f7f80b85386 Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 00:52:32 -0700 Subject: [PATCH 30/44] [vector-crypto] Bug fixes on intrinsic definitions - Add operand mnemonics for overloaded intrinsics of vaesef/vsaesem/vaesdf/vaesdm - Add vs2 operand for vaeskf2 - Fix vs2 data type for vwsll --- .../rvv_intrinsic_gen/generator.py | 3 +- .../templates/vector_crypto_template.py | 34 +++++++++++++------ 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 09f6a86b3..4c1d4e117 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -257,7 +257,8 @@ def get_overloaded_op_name(name): overloaded_name = "_".join([sn[0], sn[1], sn[-1]]) elif any(op in name for op in [ "vzext", "vsext", "vwadd", "vwsub", "vfwadd", "vfwsub", "vwadd", - "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv", "vsm4r" + "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv", "vsm4r", "vaesef", "vaesem", + "vaesdf", "vaesdm" ]): # 2. compiler can not distinguish *.wx and *.vx, need encode them in # suffix, for example: diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index e11704c7a..766ab20f2 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -51,7 +51,7 @@ def has_vd_input(name): has_vd_input_inst_set = { "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz", - "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r", "vsm3c" + "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r", "vsm3c", "vaeskf2" } return name in has_vd_input_inst_set @@ -114,7 +114,16 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): kwargs["return_type"] = type_helper.wv else: kwargs["return_type"] = type_helper.v - kwargs = {**kwargs, **decorator.mask_args(type_helper.m, type_helper.v)} + if op == "vwsll": + kwargs = { + **kwargs, + **decorator.mask_args(type_helper.m, type_helper.wv) + } + else: + kwargs = { + **kwargs, + **decorator.mask_args(type_helper.m, type_helper.v) + } # If vd is already in the input parameter, we don't need to emit another # parameter when tail policy is TU. if has_vd_input(op): @@ -139,10 +148,6 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): kwargs["vl"] = type_helper.size_t - if op == "vwsll": - args["SEW"] = args["WSEW"] - args["LMUL"] = args["WLMUL"] - if operand_mnemonic == "vs": starting_from_lmul_index = lmul_list.index(args["LMUL"]) # print(starting_from_lmul_index) @@ -158,10 +163,17 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): decorator.func_suffix, **kwargs) else: - G.func( - inst_info, - name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + - decorator.func_suffix, - **kwargs) + if op == "vwsll": + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{WSEW}m{WLMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + else: + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) G.inst_group_epilogue() From 38fbc31fb82afcd14f87d55f5f3f49374890c9a9 Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 01:00:23 -0700 Subject: [PATCH 31/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 70 ++-- ...r_bit-manipulation_used_in_cryptography.md | 60 +-- ...d_-_nist_suite:_vector_aes_block_cipher.md | 10 +- .../overloaded_intrinsic_funcs.md | 230 +++++------ ...r_bit-manipulation_used_in_cryptography.md | 60 +-- ...d_-_nist_suite:_vector_aes_block_cipher.md | 170 ++++---- .../policy_funcs/intrinsic_funcs.md | 220 +++++----- ...r_bit-manipulation_used_in_cryptography.md | 210 +++++----- ...d_-_nist_suite:_vector_aes_block_cipher.md | 10 +- .../overloaded_intrinsic_funcs.md | 380 +++++++++--------- ...r_bit-manipulation_used_in_cryptography.md | 210 +++++----- ...d_-_nist_suite:_vector_aes_block_cipher.md | 170 ++++---- 12 files changed, 900 insertions(+), 900 deletions(-) diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 351cd0e14..92c87a657 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -518,66 +518,66 @@ vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); ``` ## Zvbc - Vector Carryless Multiplication: @@ -742,11 +742,11 @@ vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md index 80f99bfc5..1778ca313 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -518,64 +518,64 @@ vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); ``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index e38485a6e..d4b9bff68 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -102,11 +102,11 @@ vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 7ad662f5a..95d6da89f 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -518,66 +518,66 @@ vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); ``` ## Zvbc - Vector Carryless Multiplication: @@ -645,92 +645,92 @@ vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -742,11 +742,11 @@ vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1 (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1 (vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md index d4d9ea35a..dfe321e52 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -518,64 +518,64 @@ vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); ``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 23825cb8e..53179ca9f 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -5,92 +5,92 @@ **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -102,11 +102,11 @@ vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1 (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1 (vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 4134b5604..f5d8ad3df 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -828,128 +828,128 @@ vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuin **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); ``` ## Zvbc - Vector Carryless Multiplication: @@ -1148,11 +1148,11 @@ vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md index f5ef93699..0031d9a2d 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -828,126 +828,126 @@ vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuin **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 5d13a96cf..32e41b5ce 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -102,11 +102,11 @@ vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index f68f11744..b84966340 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -828,128 +828,128 @@ vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t v **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); ``` ## Zvbc - Vector Carryless Multiplication: @@ -1051,92 +1051,92 @@ vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -1148,11 +1148,11 @@ vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t u vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md index c94663c42..4bcf7ffbd 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -828,126 +828,126 @@ vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t v **Prototypes:** ``` C vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 9fc84bc20..9a64c8e5d 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -5,92 +5,92 @@ **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -102,11 +102,11 @@ vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t u vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); ``` ### [Vector AES round zero](): From 5d64bc5c47a329f02024c4c8d0bb5aa339704b88 Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 01:00:26 -0700 Subject: [PATCH 32/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vaeskf2.c | 20 +- .../vector-crypto/api-testing/vwsll.c | 60 ++--- .../vector-crypto/llvm-api-tests/vaeskf2.c | 20 +- .../vector-crypto/llvm-api-tests/vwsll.c | 60 ++--- .../llvm-overloaded-tests/vaesdf.c | 40 ++-- .../llvm-overloaded-tests/vaesdm.c | 40 ++-- .../llvm-overloaded-tests/vaesef.c | 40 ++-- .../llvm-overloaded-tests/vaesem.c | 40 ++-- .../llvm-overloaded-tests/vaeskf2.c | 20 +- .../llvm-overloaded-tests/vwsll.c | 60 ++--- .../overloaded-api-testing/vaesdf.c | 40 ++-- .../overloaded-api-testing/vaesdm.c | 40 ++-- .../overloaded-api-testing/vaesef.c | 40 ++-- .../overloaded-api-testing/vaesem.c | 40 ++-- .../overloaded-api-testing/vaeskf2.c | 20 +- .../overloaded-api-testing/vwsll.c | 60 ++--- .../policy_funcs/api-testing/vaeskf2.c | 20 +- .../policy_funcs/api-testing/vwsll.c | 210 +++++++++--------- .../policy_funcs/llvm-api-tests/vaeskf2.c | 20 +- .../policy_funcs/llvm-api-tests/vwsll.c | 210 +++++++++--------- .../llvm-overloaded-tests/vaesdf.c | 40 ++-- .../llvm-overloaded-tests/vaesdm.c | 40 ++-- .../llvm-overloaded-tests/vaesef.c | 40 ++-- .../llvm-overloaded-tests/vaesem.c | 40 ++-- .../llvm-overloaded-tests/vaeskf2.c | 20 +- .../llvm-overloaded-tests/vwsll.c | 210 +++++++++--------- .../overloaded-api-testing/vaesdf.c | 40 ++-- .../overloaded-api-testing/vaesdm.c | 40 ++-- .../overloaded-api-testing/vaesef.c | 40 ++-- .../overloaded-api-testing/vaesem.c | 40 ++-- .../overloaded-api-testing/vaeskf2.c | 20 +- .../overloaded-api-testing/vwsll.c | 210 +++++++++--------- 32 files changed, 940 insertions(+), 940 deletions(-) diff --git a/auto-generated/vector-crypto/api-testing/vaeskf2.c b/auto-generated/vector-crypto/api-testing/vaeskf2.c index 50cf20d1b..7509d6775 100644 --- a/auto-generated/vector-crypto/api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32mf2(vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m1(vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m2(vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m4(vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m8(vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vwsll.c b/auto-generated/vector-crypto/api-testing/vwsll.c index a36e5a3c6..270591974 100644 --- a/auto-generated/vector-crypto/api-testing/vwsll.c +++ b/auto-generated/vector-crypto/api-testing/vwsll.c @@ -8,7 +8,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); } @@ -16,7 +16,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); } @@ -24,7 +24,7 @@ vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); } @@ -32,7 +32,7 @@ vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); } @@ -40,7 +40,7 @@ vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); } @@ -48,7 +48,7 @@ vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); } @@ -56,7 +56,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); } @@ -64,7 +64,7 @@ vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); } @@ -72,7 +72,7 @@ vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); } @@ -80,7 +80,7 @@ vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); } @@ -88,7 +88,7 @@ vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); } @@ -96,7 +96,7 @@ vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); } @@ -104,7 +104,7 @@ vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); } @@ -112,7 +112,7 @@ vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); } @@ -120,7 +120,7 @@ vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); } @@ -128,7 +128,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t return __riscv_vwsll_vv_u16mf4_m(mask, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_m(mask, vs2, rs1, vl); } @@ -136,7 +136,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t return __riscv_vwsll_vv_u16mf2_m(mask, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_m(mask, vs2, rs1, vl); } @@ -144,7 +144,7 @@ vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t v return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl); } @@ -152,7 +152,7 @@ vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, return __riscv_vwsll_vv_u16m2_m(mask, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_m(mask, vs2, rs1, vl); } @@ -160,7 +160,7 @@ vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, return __riscv_vwsll_vv_u16m4_m(mask, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_m(mask, vs2, rs1, vl); } @@ -168,7 +168,7 @@ vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, return __riscv_vwsll_vv_u16m8_m(mask, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_m(mask, vs2, rs1, vl); } @@ -176,7 +176,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4 return __riscv_vwsll_vv_u32mf2_m(mask, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_m(mask, vs2, rs1, vl); } @@ -184,7 +184,7 @@ vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t return __riscv_vwsll_vv_u32m1_m(mask, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_m(mask, vs2, rs1, vl); } @@ -192,7 +192,7 @@ vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t v return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl); } @@ -200,7 +200,7 @@ vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs return __riscv_vwsll_vv_u32m4_m(mask, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_m(mask, vs2, rs1, vl); } @@ -208,7 +208,7 @@ vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs return __riscv_vwsll_vv_u32m8_m(mask, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_m(mask, vs2, rs1, vl); } @@ -216,7 +216,7 @@ vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t return __riscv_vwsll_vv_u64m1_m(mask, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_m(mask, vs2, rs1, vl); } @@ -224,7 +224,7 @@ vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t v return __riscv_vwsll_vv_u64m2_m(mask, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_m(mask, vs2, rs1, vl); } @@ -232,7 +232,7 @@ vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t v return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl); } @@ -240,7 +240,7 @@ vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs return __riscv_vwsll_vv_u64m8_m(mask, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_m(mask, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c index 036a12b52..fee669e56 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32mf2(vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m1(vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m2(vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m4(vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m8(vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c index ca3fdaa23..70212a2c4 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c @@ -9,7 +9,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); } @@ -17,7 +17,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); } @@ -25,7 +25,7 @@ vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); } @@ -33,7 +33,7 @@ vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); } @@ -41,7 +41,7 @@ vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); } @@ -49,7 +49,7 @@ vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); } @@ -57,7 +57,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); } @@ -65,7 +65,7 @@ vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); } @@ -73,7 +73,7 @@ vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); } @@ -81,7 +81,7 @@ vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); } @@ -89,7 +89,7 @@ vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); } @@ -97,7 +97,7 @@ vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); } @@ -105,7 +105,7 @@ vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); } @@ -113,7 +113,7 @@ vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); } @@ -121,7 +121,7 @@ vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); } @@ -129,7 +129,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t return __riscv_vwsll_vv_u16mf4_m(mask, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_m(mask, vs2, rs1, vl); } @@ -137,7 +137,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t return __riscv_vwsll_vv_u16mf2_m(mask, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_m(mask, vs2, rs1, vl); } @@ -145,7 +145,7 @@ vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t v return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl); } @@ -153,7 +153,7 @@ vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, return __riscv_vwsll_vv_u16m2_m(mask, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_m(mask, vs2, rs1, vl); } @@ -161,7 +161,7 @@ vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, return __riscv_vwsll_vv_u16m4_m(mask, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_m(mask, vs2, rs1, vl); } @@ -169,7 +169,7 @@ vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, return __riscv_vwsll_vv_u16m8_m(mask, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_m(mask, vs2, rs1, vl); } @@ -177,7 +177,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4 return __riscv_vwsll_vv_u32mf2_m(mask, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_m(mask, vs2, rs1, vl); } @@ -185,7 +185,7 @@ vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t return __riscv_vwsll_vv_u32m1_m(mask, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_m(mask, vs2, rs1, vl); } @@ -193,7 +193,7 @@ vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t v return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl); } @@ -201,7 +201,7 @@ vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs return __riscv_vwsll_vv_u32m4_m(mask, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_m(mask, vs2, rs1, vl); } @@ -209,7 +209,7 @@ vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs return __riscv_vwsll_vv_u32m8_m(mask, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_m(mask, vs2, rs1, vl); } @@ -217,7 +217,7 @@ vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t return __riscv_vwsll_vv_u64m1_m(mask, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_m(mask, vs2, rs1, vl); } @@ -225,7 +225,7 @@ vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t v return __riscv_vwsll_vv_u64m2_m(mask, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_m(mask, vs2, rs1, vl); } @@ -233,7 +233,7 @@ vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t v return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl); } @@ -241,7 +241,7 @@ vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs return __riscv_vwsll_vv_u64m8_m(mask, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_m(mask, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c index b7bd2a7b8..7126fd3d3 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c index c23154f3a..6754c6e31 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c index fe2d7fee1..076dadfed 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c index abedf1d40..cd8da8835 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c index 544e99fef..40dee84d2 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c index f739b1cd3..ab2309ab1 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c @@ -9,7 +9,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -17,7 +17,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -25,7 +25,7 @@ vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -33,7 +33,7 @@ vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -41,7 +41,7 @@ vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -49,7 +49,7 @@ vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -57,7 +57,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) return __riscv_vwsll(vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -65,7 +65,7 @@ vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -73,7 +73,7 @@ vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -81,7 +81,7 @@ vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -89,7 +89,7 @@ vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -97,7 +97,7 @@ vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -105,7 +105,7 @@ vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -113,7 +113,7 @@ vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -121,7 +121,7 @@ vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -129,7 +129,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -137,7 +137,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -145,7 +145,7 @@ vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -153,7 +153,7 @@ vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -161,7 +161,7 @@ vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -169,7 +169,7 @@ vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -177,7 +177,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4 return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -185,7 +185,7 @@ vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -193,7 +193,7 @@ vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -201,7 +201,7 @@ vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -209,7 +209,7 @@ vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -217,7 +217,7 @@ vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -225,7 +225,7 @@ vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -233,7 +233,7 @@ vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -241,7 +241,7 @@ vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c index 5dfd28986..a0bf2bc63 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vv(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf(vd, vs2, vl); + return __riscv_vaesdf_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c index 6a427cc9a..bb7253273 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vv(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm(vd, vs2, vl); + return __riscv_vaesdm_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c index dca8acbbc..df69a7db4 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vv(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef(vd, vs2, vl); + return __riscv_vaesef_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c index 17d8de48b..89631199e 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vv(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem(vd, vs2, vl); + return __riscv_vaesem_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c index 660b0ba39..94ff06c1a 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2(vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c index c0e0521ff..f90328a94 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c @@ -8,7 +8,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -16,7 +16,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -24,7 +24,7 @@ vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -32,7 +32,7 @@ vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -40,7 +40,7 @@ vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -48,7 +48,7 @@ vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -56,7 +56,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) return __riscv_vwsll(vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -64,7 +64,7 @@ vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -72,7 +72,7 @@ vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -80,7 +80,7 @@ vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -88,7 +88,7 @@ vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -96,7 +96,7 @@ vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -104,7 +104,7 @@ vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -112,7 +112,7 @@ vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -120,7 +120,7 @@ vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } @@ -128,7 +128,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -136,7 +136,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -144,7 +144,7 @@ vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -152,7 +152,7 @@ vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -160,7 +160,7 @@ vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -168,7 +168,7 @@ vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -176,7 +176,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4 return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -184,7 +184,7 @@ vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -192,7 +192,7 @@ vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -200,7 +200,7 @@ vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -208,7 +208,7 @@ vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -216,7 +216,7 @@ vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -224,7 +224,7 @@ vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -232,7 +232,7 @@ vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t v return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } @@ -240,7 +240,7 @@ vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs return __riscv_vwsll(mask, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(mask, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c index 3fcb9e9b4..2451093d1 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m4_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m8_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c index a99acee03..ca8992376 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c @@ -8,7 +8,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vu return __riscv_vwsll_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); } @@ -16,7 +16,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vu return __riscv_vwsll_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); } @@ -24,7 +24,7 @@ vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl); } @@ -32,7 +32,7 @@ vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8 return __riscv_vwsll_vv_u16m2_tu(maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_tu(maskedoff, vs2, rs1, vl); } @@ -40,7 +40,7 @@ vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8 return __riscv_vwsll_vv_u16m4_tu(maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_tu(maskedoff, vs2, rs1, vl); } @@ -48,7 +48,7 @@ vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8 return __riscv_vwsll_vv_u16m8_tu(maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_tu(maskedoff, vs2, rs1, vl); } @@ -56,7 +56,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, v return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); } @@ -64,7 +64,7 @@ vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuin return __riscv_vwsll_vv_u32m1_tu(maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_tu(maskedoff, vs2, rs1, vl); } @@ -72,7 +72,7 @@ vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint return __riscv_vwsll_vv_u32m2_tu(maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_tu(maskedoff, vs2, rs1, vl); } @@ -80,7 +80,7 @@ vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint return __riscv_vwsll_vv_u32m4_tu(maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_tu(maskedoff, vs2, rs1, vl); } @@ -88,7 +88,7 @@ vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint return __riscv_vwsll_vv_u32m8_tu(maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_tu(maskedoff, vs2, rs1, vl); } @@ -96,7 +96,7 @@ vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuin return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl); } @@ -104,7 +104,7 @@ vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint return __riscv_vwsll_vv_u64m2_tu(maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_tu(maskedoff, vs2, rs1, vl); } @@ -112,7 +112,7 @@ vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint return __riscv_vwsll_vv_u64m4_tu(maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_tu(maskedoff, vs2, rs1, vl); } @@ -120,367 +120,367 @@ vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint return __riscv_vwsll_vv_u64m8_tu(maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_tu(maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c index 448191a2f..16b8eaa79 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m4_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2_vi_u32m8_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c index 21b1bc7e8..f9415d873 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c @@ -9,7 +9,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vu return __riscv_vwsll_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); } @@ -17,7 +17,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vu return __riscv_vwsll_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); } @@ -25,7 +25,7 @@ vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl); } @@ -33,7 +33,7 @@ vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8 return __riscv_vwsll_vv_u16m2_tu(maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_tu(maskedoff, vs2, rs1, vl); } @@ -41,7 +41,7 @@ vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8 return __riscv_vwsll_vv_u16m4_tu(maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_tu(maskedoff, vs2, rs1, vl); } @@ -49,7 +49,7 @@ vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8 return __riscv_vwsll_vv_u16m8_tu(maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_tu(maskedoff, vs2, rs1, vl); } @@ -57,7 +57,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, v return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); } @@ -65,7 +65,7 @@ vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuin return __riscv_vwsll_vv_u32m1_tu(maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_tu(maskedoff, vs2, rs1, vl); } @@ -73,7 +73,7 @@ vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint return __riscv_vwsll_vv_u32m2_tu(maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_tu(maskedoff, vs2, rs1, vl); } @@ -81,7 +81,7 @@ vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint return __riscv_vwsll_vv_u32m4_tu(maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_tu(maskedoff, vs2, rs1, vl); } @@ -89,7 +89,7 @@ vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint return __riscv_vwsll_vv_u32m8_tu(maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_tu(maskedoff, vs2, rs1, vl); } @@ -97,7 +97,7 @@ vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuin return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl); } @@ -105,7 +105,7 @@ vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint return __riscv_vwsll_vv_u64m2_tu(maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_tu(maskedoff, vs2, rs1, vl); } @@ -113,7 +113,7 @@ vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint return __riscv_vwsll_vv_u64m4_tu(maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_tu(maskedoff, vs2, rs1, vl); } @@ -121,367 +121,367 @@ vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint return __riscv_vwsll_vv_u64m8_tu(maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_tu(maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c index b15017b19..765042dee 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c index b9933247b..0b7cc7547 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c index a0c60bc29..236015c96 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c index 1cd5624ca..866bf2914 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -6,82 +6,82 @@ #include vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c index 605ff4a98..9526a18de 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -5,23 +5,23 @@ #include -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c index 6f9409182..dbe38e613 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c @@ -9,7 +9,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vu return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -17,7 +17,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vu return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -25,7 +25,7 @@ vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -33,7 +33,7 @@ vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8 return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -41,7 +41,7 @@ vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8 return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -49,7 +49,7 @@ vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8 return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -57,7 +57,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, v return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -65,7 +65,7 @@ vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuin return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -73,7 +73,7 @@ vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -81,7 +81,7 @@ vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -89,7 +89,7 @@ vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -97,7 +97,7 @@ vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuin return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -105,7 +105,7 @@ vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -113,7 +113,7 @@ vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -121,367 +121,367 @@ vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c index 4e39e8e29..51819499f 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_tu(vd, vs2, vl); + return __riscv_vaesdf_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c index 2e1a59bb6..ae17f9b58 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_tu(vd, vs2, vl); + return __riscv_vaesdm_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c index 849fc43e6..a46ede689 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_tu(vd, vs2, vl); + return __riscv_vaesef_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c index ff158a365..a0930f52b 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -5,82 +5,82 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vv_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_tu(vd, vs2, vl); + return __riscv_vaesem_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c index c43fbbb09..ef989068e 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c @@ -4,23 +4,23 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf2_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c index 76d9f8828..e316013f9 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c @@ -8,7 +8,7 @@ vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vu return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -16,7 +16,7 @@ vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vu return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -24,7 +24,7 @@ vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -32,7 +32,7 @@ vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8 return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -40,7 +40,7 @@ vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8 return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -48,7 +48,7 @@ vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8 return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -56,7 +56,7 @@ vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, v return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -64,7 +64,7 @@ vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuin return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -72,7 +72,7 @@ vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -80,7 +80,7 @@ vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -88,7 +88,7 @@ vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -96,7 +96,7 @@ vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuin return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -104,7 +104,7 @@ vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -112,7 +112,7 @@ vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } @@ -120,367 +120,367 @@ vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); } From f96638c631bdba3865d0f5b13ce5a9ed5e9f7293 Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 18:33:26 -0700 Subject: [PATCH 33/44] [vector-crypto] Append vs2 type in function name of vs variants of vaesef/vsaesem/vaesdf/vaesdm Signed-off-by: eop Chen --- .../templates/vector_crypto_template.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index 766ab20f2..54f34edf4 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -156,12 +156,10 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" kwargs["vd"] = f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" kwargs["vs2"] = f"v{args['TYPE']}{args['SEW']}m{args['LMUL']}_t" - args["LMUL"] = lmul_list[i] - G.func( - inst_info, - name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + - decorator.func_suffix, - **kwargs) + func_name = "{OP}_{MNEMONIC}_".format_map(args) +\ + f"{args['TYPE']}{args['SEW']}m{args['LMUL']}_" +\ + f"{args['TYPE']}{args['SEW']}m{lmul_list[i]}" + G.func(inst_info, name=func_name + decorator.func_suffix, **kwargs) else: if op == "vwsll": G.func( From 062975c50261fb59f1eb1dea5f3fa874e9f33f6f Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 18:34:18 -0700 Subject: [PATCH 34/44] [Auto-gen] Update documents under ../auto-generated/vector-crypto. (make git-commit-autogen-doc) --- .../vector-crypto/intrinsic_funcs.md | 180 +++++++++--------- ...d_-_nist_suite:_vector_aes_block_cipher.md | 150 +++++++-------- ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 30 +-- .../overloaded_intrinsic_funcs.md | 72 +++---- ...d_-_nist_suite:_vector_aes_block_cipher.md | 60 +++--- ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 12 +- .../policy_funcs/intrinsic_funcs.md | 180 +++++++++--------- ...d_-_nist_suite:_vector_aes_block_cipher.md | 150 +++++++-------- ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 30 +-- .../overloaded_intrinsic_funcs.md | 72 +++---- ...d_-_nist_suite:_vector_aes_block_cipher.md | 60 +++--- ...vksed_-_shangmi_suite:_sm4_block_cipher.md | 12 +- 12 files changed, 504 insertions(+), 504 deletions(-) diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 92c87a657..b5690f43c 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -646,45 +646,45 @@ vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): @@ -692,45 +692,45 @@ vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl) **Prototypes:** ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -753,21 +753,21 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t ui **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesz_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: @@ -829,25 +829,25 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vsm4r_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ## Zvksh - ShangMi Suite: SM3 Secure Hash: diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index d4b9bff68..5a9f440a2 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -6,45 +6,45 @@ **Prototypes:** ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): @@ -52,45 +52,45 @@ vuint32m8_t __riscv_vaesem_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl) **Prototypes:** ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -113,19 +113,19 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t ui **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesz_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index e2991d231..ad5aeec27 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -17,23 +17,23 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); **Prototypes:** ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vsm4r_vs_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 95d6da89f..6906c44bd 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -648,18 +648,18 @@ vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -668,18 +668,18 @@ vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -694,18 +694,18 @@ vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -714,18 +714,18 @@ vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -755,16 +755,16 @@ vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_ ``` C vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); @@ -831,18 +831,18 @@ vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 53179ca9f..b750c129f 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -8,18 +8,18 @@ vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -28,18 +28,18 @@ vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -54,18 +54,18 @@ vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -74,18 +74,18 @@ vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -115,16 +115,16 @@ vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_ ``` C vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index cd2448263..8b9eb1b2a 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -19,18 +19,18 @@ vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index f5d8ad3df..84544a85a 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1052,45 +1052,45 @@ vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t v **Prototypes:** ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): @@ -1098,45 +1098,45 @@ vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t **Prototypes:** ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -1159,21 +1159,21 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesz_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: @@ -1235,25 +1235,25 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, s **Prototypes:** ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vsm4r_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ## Zvksh - ShangMi Suite: SM3 Secure Hash: diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 32e41b5ce..978cdee59 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -6,45 +6,45 @@ **Prototypes:** ``` C vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesef_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesef_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesem_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesem_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES Decryption](): @@ -52,45 +52,45 @@ vuint32m8_t __riscv_vaesem_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t **Prototypes:** ``` C vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdf_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdf_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vaesdm_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vaesdm_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` ### [Vector AES-128 Forward KeySchedule generation](): @@ -113,19 +113,19 @@ vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t **Prototypes:** ``` C -vuint32mf2_t __riscv_vaesz_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m1_t __riscv_vaesz_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index 2de962e21..49419391a 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -17,23 +17,23 @@ vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, s **Prototypes:** ``` C vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32mf2_t __riscv_vsm4r_vs_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m1_t __riscv_vsm4r_vs_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); ``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index b84966340..a1769992b 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1054,18 +1054,18 @@ vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -1074,18 +1074,18 @@ vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -1100,18 +1100,18 @@ vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -1120,18 +1120,18 @@ vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -1161,16 +1161,16 @@ vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, si ``` C vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); @@ -1237,18 +1237,18 @@ vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uim vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md index 9a64c8e5d..6b16a5a48 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md @@ -8,18 +8,18 @@ vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -28,18 +28,18 @@ vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -54,18 +54,18 @@ vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -74,18 +74,18 @@ vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); @@ -115,16 +115,16 @@ vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, si ``` C vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md index 7487356cb..3129cb528 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md @@ -19,18 +19,18 @@ vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uim vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); -vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); -vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); From 8a5d49a85e28063da6edee3d36aabfab1002daef Mon Sep 17 00:00:00 2001 From: eopXD Date: Tue, 1 Aug 2023 18:34:20 -0700 Subject: [PATCH 35/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- .../vector-crypto/api-testing/vaesdf.c | 60 +++++++++---------- .../vector-crypto/api-testing/vaesdm.c | 60 +++++++++---------- .../vector-crypto/api-testing/vaesef.c | 60 +++++++++---------- .../vector-crypto/api-testing/vaesem.c | 60 +++++++++---------- .../vector-crypto/api-testing/vaesz.c | 60 +++++++++---------- .../vector-crypto/api-testing/vsm4r.c | 60 +++++++++---------- .../vector-crypto/llvm-api-tests/vaesdf.c | 60 +++++++++---------- .../vector-crypto/llvm-api-tests/vaesdm.c | 60 +++++++++---------- .../vector-crypto/llvm-api-tests/vaesef.c | 60 +++++++++---------- .../vector-crypto/llvm-api-tests/vaesem.c | 60 +++++++++---------- .../vector-crypto/llvm-api-tests/vaesz.c | 60 +++++++++---------- .../vector-crypto/llvm-api-tests/vsm4r.c | 60 +++++++++---------- .../llvm-overloaded-tests/vaesdf.c | 30 +++++----- .../llvm-overloaded-tests/vaesdm.c | 30 +++++----- .../llvm-overloaded-tests/vaesef.c | 30 +++++----- .../llvm-overloaded-tests/vaesem.c | 30 +++++----- .../llvm-overloaded-tests/vaesz.c | 30 +++++----- .../llvm-overloaded-tests/vsm4r.c | 30 +++++----- .../overloaded-api-testing/vaesdf.c | 30 +++++----- .../overloaded-api-testing/vaesdm.c | 30 +++++----- .../overloaded-api-testing/vaesef.c | 30 +++++----- .../overloaded-api-testing/vaesem.c | 30 +++++----- .../overloaded-api-testing/vaesz.c | 30 +++++----- .../overloaded-api-testing/vsm4r.c | 30 +++++----- .../policy_funcs/api-testing/vaesdf.c | 60 +++++++++---------- .../policy_funcs/api-testing/vaesdm.c | 60 +++++++++---------- .../policy_funcs/api-testing/vaesef.c | 60 +++++++++---------- .../policy_funcs/api-testing/vaesem.c | 60 +++++++++---------- .../policy_funcs/api-testing/vaesz.c | 60 +++++++++---------- .../policy_funcs/api-testing/vsm4r.c | 60 +++++++++---------- .../policy_funcs/llvm-api-tests/vaesdf.c | 60 +++++++++---------- .../policy_funcs/llvm-api-tests/vaesdm.c | 60 +++++++++---------- .../policy_funcs/llvm-api-tests/vaesef.c | 60 +++++++++---------- .../policy_funcs/llvm-api-tests/vaesem.c | 60 +++++++++---------- .../policy_funcs/llvm-api-tests/vaesz.c | 60 +++++++++---------- .../policy_funcs/llvm-api-tests/vsm4r.c | 60 +++++++++---------- .../llvm-overloaded-tests/vaesdf.c | 30 +++++----- .../llvm-overloaded-tests/vaesdm.c | 30 +++++----- .../llvm-overloaded-tests/vaesef.c | 30 +++++----- .../llvm-overloaded-tests/vaesem.c | 30 +++++----- .../llvm-overloaded-tests/vaesz.c | 30 +++++----- .../llvm-overloaded-tests/vsm4r.c | 30 +++++----- .../overloaded-api-testing/vaesdf.c | 30 +++++----- .../overloaded-api-testing/vaesdm.c | 30 +++++----- .../overloaded-api-testing/vaesef.c | 30 +++++----- .../overloaded-api-testing/vaesem.c | 30 +++++----- .../overloaded-api-testing/vaesz.c | 30 +++++----- .../overloaded-api-testing/vsm4r.c | 30 +++++----- 48 files changed, 1080 insertions(+), 1080 deletions(-) diff --git a/auto-generated/vector-crypto/api-testing/vaesdf.c b/auto-generated/vector-crypto/api-testing/vaesdf.c index ec6e8b067..fac9c44ee 100644 --- a/auto-generated/vector-crypto/api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/api-testing/vaesdf.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesdm.c b/auto-generated/vector-crypto/api-testing/vaesdm.c index dd7a8ab52..17261e874 100644 --- a/auto-generated/vector-crypto/api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/api-testing/vaesdm.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesef.c b/auto-generated/vector-crypto/api-testing/vaesef.c index 3e26be98e..683ac6669 100644 --- a/auto-generated/vector-crypto/api-testing/vaesef.c +++ b/auto-generated/vector-crypto/api-testing/vaesef.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesem.c b/auto-generated/vector-crypto/api-testing/vaesem.c index b47a15900..dc67813e1 100644 --- a/auto-generated/vector-crypto/api-testing/vaesem.c +++ b/auto-generated/vector-crypto/api-testing/vaesem.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vaesz.c b/auto-generated/vector-crypto/api-testing/vaesz.c index cc4349b45..e5137944a 100644 --- a/auto-generated/vector-crypto/api-testing/vaesz.c +++ b/auto-generated/vector-crypto/api-testing/vaesz.c @@ -4,63 +4,63 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/api-testing/vsm4r.c b/auto-generated/vector-crypto/api-testing/vsm4r.c index 7c5ff7a51..d690e5618 100644 --- a/auto-generated/vector-crypto/api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/api-testing/vsm4r.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c index 4c9faed7b..83739ef45 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c index 9cff36983..5f239f166 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c index 8c7ab8abf..326fae048 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c index d01b30f7c..9cdf5b35a 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c index aad378dba..8bc82d23d 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -5,63 +5,63 @@ #include -vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c index a37d743e7..d44ca8d2d 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); } vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); } vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); } vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_u32m8(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c index 7126fd3d3..e019f6c4f 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c index 6754c6e31..58f43cc58 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c index 076dadfed..e9f9544e9 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c index cd8da8835..1a9281fe7 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c index f459e124a..aa779b172 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -5,63 +5,63 @@ #include -vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c index 7a1d28756..e5e8d9c43 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c index a0bf2bc63..9807668e4 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c index bb7253273..d9cd8ced8 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c index df69a7db4..96380b425 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c index 89631199e..4539af8cd 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c index 92f09192f..cd9069a7e 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c @@ -4,63 +4,63 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c index 95bd0716a..66735b96c 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c index 2eefcbc01..1bc0fc4a4 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c index 97ab441f7..fa4536189 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c index 2bcdbc400..d499b8720 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c index 0f179040e..345b93db5 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c index 4548ca4e0..57a9822f3 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c @@ -4,63 +4,63 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c index e12f9028d..996ca813c 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c @@ -8,79 +8,79 @@ vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c index 7b77ff31d..3c8991f42 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c index b7f84c4b7..51c81225a 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c index c1debf192..fe895ad8b 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c index e5dc8630c..6622e1f43 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c index 8d48a65f2..1c3ca8a3b 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -5,63 +5,63 @@ #include -vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c index 245a6ae12..52be1b25d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -9,79 +9,79 @@ vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl); } vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl); } vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl); } vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_u32m8_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c index 765042dee..6347f9767 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c index 0b7cc7547..002879bbd 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c index 236015c96..bb6ea6568 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c index 866bf2914..ec2d40643 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c index e9cd85400..0f3500c55 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -5,63 +5,63 @@ #include -vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c index f8612d784..598ea9f47 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -9,23 +9,23 @@ vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -33,19 +33,19 @@ vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -53,15 +53,15 @@ vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -69,11 +69,11 @@ vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -81,7 +81,7 @@ vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c index 51819499f..7e2d582b1 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c index ae17f9b58..191885804 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c index a46ede689..2230a962c 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c index a0930f52b..f0fff627e 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c index 40c8e9cb3..b4364cdd2 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c @@ -4,63 +4,63 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c index abf418bb9..bf509ae52 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c @@ -8,23 +8,23 @@ vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -32,19 +32,19 @@ vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -52,15 +52,15 @@ vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -68,11 +68,11 @@ vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -80,7 +80,7 @@ vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } From abea467c1fbf8207b4ebd95b64c936f3669c7e8f Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 3 Aug 2023 00:17:55 -0700 Subject: [PATCH 36/44] [vector-crypto] Add llvm test case header for vector crypto extensions Signed-off-by: eop Chen --- .../rvv_intrinsic_gen/generator.py | 33 +++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 4c1d4e117..0c0bf5669 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -456,7 +456,7 @@ def inst_group_prologue(self): def inst_group_epilogue(self): return "" - def write_file_header(self, has_float_type, has_bfloat16_type): + def write_file_header(self, has_float_type, has_bfloat16_type, name): #pylint: disable=line-too-long int_llvm_header = r"""// REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ @@ -485,9 +485,38 @@ def write_file_header(self, has_float_type, has_bfloat16_type): r""" -Wno-psabi -O3 -fno-schedule-insns -fno-schedule-insns2" } */ """) + + vector_crypto_llvm_header = (r"""// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +""") + + def is_vector_crypto_inst(name): + vector_crypto_inst = [ + "vandn", "vbrev", "vbrev8", "vrev8", "vclz", "vctz", "vrol", "vror", + "vwsll", "vclmul", "vclmulh", "vghsh", "vgmul", "vaesef", "vaesem", + "vaesdf", "vaesdm", "vaeskf1", "vaeskf2", "vaesz", "vsha2ms", + "vsha2ch", "vsha2cl", "vsm4k", "vsm4r", "vsm3me", "vsm3c" + ] + for inst in vector_crypto_inst: + if inst in name: + return True + return False + if self.toolchain_type == ToolChainType.LLVM: if has_bfloat16_type: self.fd.write(bfloat16_llvm_header) + elif is_vector_crypto_inst(name): + self.fd.write(vector_crypto_llvm_header) elif has_float_type: self.fd.write(float_llvm_header) else: @@ -568,7 +597,7 @@ def func(self, inst_info, name, return_type, **kwargs): has_float_type = True if header: - self.write_file_header(has_float_type, has_bfloat16_type) + self.write_file_header(has_float_type, has_bfloat16_type, name) def output_call_arg(arg_name, type_name): if ((name.startswith("vget") or name.startswith("vset")) \ From 2d0fd387c64be58ff2b37343597a740ce1a35be9 Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 3 Aug 2023 01:01:07 -0700 Subject: [PATCH 37/44] [Auto-gen] Update tests under ../auto-generated/vector-crypto. (make git-commit-autogen-test) --- auto-generated/vector-crypto/llvm-api-tests/vaesdf.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vaesdm.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vaesef.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vaesem.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vaesz.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vandn.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vbrev.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vbrev8.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vclmul.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vclmulh.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vclz.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vctz.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vghsh.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vgmul.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vrev8.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vrol.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vror.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsm3c.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsm3me.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsm4k.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vsm4r.c | 9 ++++++++- auto-generated/vector-crypto/llvm-api-tests/vwsll.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaesdf.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaesdm.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaesef.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaesem.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaeskf1.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaeskf2.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vaesz.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vandn.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vbrev.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vbrev8.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vclmul.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vclmulh.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vclz.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vctz.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vghsh.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vgmul.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vrev8.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vrol.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vror.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsha2ch.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsha2cl.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsha2ms.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsm3c.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsm3me.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsm4k.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vsm4r.c | 9 ++++++++- .../vector-crypto/llvm-overloaded-tests/vwsll.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaesef.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaesem.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vaesz.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vandn.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vbrev.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vclmul.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vghsh.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vgmul.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vrev8.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vrol.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vror.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c | 9 ++++++++- .../vector-crypto/policy_funcs/llvm-api-tests/vwsll.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaesdf.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaesdm.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaesef.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaesem.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaeskf1.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaeskf2.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vaesz.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vandn.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vbrev.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vbrev8.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vclmul.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vclmulh.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vghsh.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vgmul.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vrev8.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vrol.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vror.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsha2ch.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsha2cl.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsha2ms.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsm3c.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsm3me.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsm4k.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vsm4r.c | 9 ++++++++- .../policy_funcs/llvm-overloaded-tests/vwsll.c | 9 ++++++++- 104 files changed, 832 insertions(+), 104 deletions(-) diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c index 83739ef45..04a638391 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c index 5f239f166..ba0c355d0 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c index 326fae048..0d1e8d720 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c index 9cdf5b35a..79d397e54 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c index f35c4a3b2..3b9857d2c 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c index fee669e56..fbb874289 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c index 8bc82d23d..d022831f1 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/llvm-api-tests/vandn.c index ac15e471b..f26790e7f 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vandn.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vandn.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c index 26c4de404..aa1f7a0e2 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c index d22110c4f..2ac7b751b 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c index a56321bd7..3751cde48 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c index 0772acf6d..5d0417a59 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/llvm-api-tests/vclz.c index 9ce26f56f..80e369c76 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vclz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vclz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/llvm-api-tests/vctz.c index 504efd27a..74863e79c 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vctz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vctz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c index 71dcf52e5..436349fb9 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c index a39f3c8c0..502aae3f8 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c index f5d49ee05..d02393633 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/llvm-api-tests/vrol.c index 1154de852..d02ca2e49 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vrol.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vrol.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vror.c b/auto-generated/vector-crypto/llvm-api-tests/vror.c index 694b6e0e0..d800a671e 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vror.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vror.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c index 046495c35..b0a9e0220 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c index 442946790..ab5430e22 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c index 76cf625eb..0d65884e1 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c index 87af91e5c..c3589f4af 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c index ce4673c23..a286c7c26 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c index 0911bd722..33d5ab701 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c index d44ca8d2d..59aece912 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c index 70212a2c4..d7b1d42bb 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c index e019f6c4f..44803c47c 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c index 58f43cc58..bcb5a0b36 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c index e9f9544e9..2768f4a6b 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c index 1a9281fe7..1cbdce0bc 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c index e15daf77d..f1048cd5a 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c index 40dee84d2..3387d50cf 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c index aa779b172..93e2e3eef 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c index e2894d7e4..7a0f664b0 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c index 0c95750c7..579baaf6d 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c index d94465fe5..980e7f4ae 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c index 36cdfb21e..b8143f3f0 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c index f5343fa97..a17c3752e 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c index f1da0ff12..fb2c59218 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c index 2dc00bb3f..cfaa556ee 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c index 5940884a9..afbd4dbc8 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c index 4d254ff6c..a091c69a7 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c index 72738a4c7..3e29204b8 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c index 51fab3b0c..1007fbea4 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c index f5439c7ab..5221bd3aa 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c index 63d6c5aea..4c3c170db 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c index c16a3b774..ab349951b 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c index c795ac036..4f3c31a0d 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c index 416f7a64f..115cbf5f7 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c index 1c1ad44b4..3a37cbefc 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c index 319a815f8..5ae9fdf14 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c index e5e8d9c43..079db5b1f 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c index ab2309ab1..eb6e5858f 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c index 3c8991f42..7bf1af06b 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c index 51c81225a..856cc6350 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c index fe895ad8b..a7ee09719 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c index 6622e1f43..3b398d5cc 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c index 36bc372bc..3a70a8170 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c index 16b8eaa79..583d8cf43 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c index 1c3ca8a3b..d5e3ba40f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c index cdd2befb5..e9f99c85f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c index fd694cc5a..fb68a2faf 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c index 1f0433554..ad555f360 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c index be7944419..0f6ab5547 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c index 053782475..0c9384bef 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c index e8271d882..a503594d6 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c index 9f725f34a..aec4008fb 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c index 69737f009..552b08f8e 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c index 088f1363a..cea862e0f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c index b7ea078c6..62ced63e1 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c index 42785c045..7b0921a22 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c index d3aa58e49..8920a97a6 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c index 1641cffed..9e7df01ff 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c index e64ed6ab7..0cda20a97 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c index f161c3a7c..a3687efb9 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c index a702fee72..9c03f0061 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c index 52be1b25d..749e2f687 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c index f9415d873..390decdcb 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c index 6347f9767..33cff9e27 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c index 002879bbd..eb5e53de7 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c index bb6ea6568..00327588f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c index ec2d40643..fc86d2d5c 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c index 32fb898ca..8ffb3eaf4 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c index 9526a18de..3da580d32 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c index 0f3500c55..352ea15d6 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c index 298fa71e9..de8cd112d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c index c0a7edfac..f8c14b057 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c index fda1416d8..83967d94f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c index 15bc5f9df..e20f8bff1 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c index cdcb58c88..87cb1377d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c index 4c246ad78..526c3a33d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c index 5bad9f0f6..c5ba6e721 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c index 264f15a6b..a723aa6de 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c index a34a5be23..aa8ba847d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c index 5a7dad772..e3545be3c 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c index d83ec593f..d7e8bf814 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c index 7f9c2327b..0dc6ff651 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c index 6648d4381..b2193b03d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c index 9cbfd29cf..f32bff343 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c index 639a153fc..657a2aed2 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c index bd902bfba..f241b53f5 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c index 598ea9f47..8d08ad373 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c index dbe38e613..c6772c8c7 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c @@ -1,5 +1,12 @@ // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +experimental-zvbb \ +// RUN: -target-feature +experimental-zvbc \ +// RUN: -target-feature +experimental-zvkg \ +// RUN: -target-feature +experimental-zvkned \ +// RUN: -target-feature +experimental-zvknhb \ +// RUN: -target-feature +experimental-zvksed \ +// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s From 856dda7e819e9e9cedb489373eea95bf80b54e0b Mon Sep 17 00:00:00 2001 From: Kito Cheng Date: Wed, 10 Apr 2024 15:33:16 +0800 Subject: [PATCH 38/44] Update auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md Co-authored-by: Nicolas Brunie <82109999+nibrunieAtSi5@users.noreply.github.com> Signed-off-by: Kito Cheng --- .../00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md index 1778ca313..26b6260a4 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md @@ -235,7 +235,7 @@ vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl) vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); ``` -### [Vector Bit-manipulation used in Cryptography - Count Bits](): +### [Vector Basic Bit-manipulation - Count Bits](): **Prototypes:** ``` C From b4019d314622d7b7c705d8289ee6dae74a00d4a1 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Wed, 24 Apr 2024 07:52:41 -0700 Subject: [PATCH 39/44] Modify descriptions in vector_crypto_inst.py for correctness --- .../rvv_intrinsic_gen/vector_crypto_inst.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index d02482f37..210209181 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -24,7 +24,7 @@ def gen(g): g.function_group( vector_crypto_template, - "Vector Bit-manipulation used in Cryptography - Reverse Bits", + "Vector Basic Bit-manipulation - Reverse Bits in Elements", "", # FIXME: We probably have a separate document for vector-crypto ["vbrev", "vbrev8", "vrev8"], UITYPE, @@ -34,7 +34,7 @@ def gen(g): g.function_group( vector_crypto_template, - "Vector Bit-manipulation used in Cryptography - Count Bits", + "Vector Basic Bit-manipulation - Count Bits", "", # FIXME: We probably have a separate document for vector-crypto ["vclz", "vctz"], UITYPE, @@ -54,7 +54,7 @@ def gen(g): g.function_group( vector_crypto_template, - "Vector Bit-manipulation used in Cryptography - Shift", + "Vector Basic Bit-manipulation used - Widening Shift", "", # FIXME: We probably have a separate document for vector-crypto ["vwsll"], UITYPE, @@ -198,7 +198,7 @@ def gen(g): g.function_group( vector_crypto_template, - "Vector SM3 Message Expansion", + "Vector SM3 Compression", "", # FIXME: We probably have a separate document for vector-crypto ["vsm3c"], UITYPE, From 45d63eb0ca2e488dfe78836d6e15794bd18ab43b Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Wed, 24 Apr 2024 09:31:33 -0700 Subject: [PATCH 40/44] Filter out LMUL=8 cases for .vs instructions --- .../rvv_intrinsic_gen/templates/vector_crypto_template.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py index 54f34edf4..28b5a466a 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -152,6 +152,9 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): starting_from_lmul_index = lmul_list.index(args["LMUL"]) # print(starting_from_lmul_index) for i in range(starting_from_lmul_index, len(lmul_list)): + if args["LMUL"] == 8: + continue + kwargs["return_type"] =\ f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" kwargs["vd"] = f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" From dc973eef76556c1539cced878054042a417bf549 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Wed, 24 Apr 2024 09:32:10 -0700 Subject: [PATCH 41/44] Remove experimental for target-feature --- .../rvv_intrinsic_gen/generator.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 0c0bf5669..c2e27f798 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -488,13 +488,13 @@ def write_file_header(self, has_float_type, has_bfloat16_type, name): vector_crypto_llvm_header = (r"""// REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s From 0632c1f95d6ac4c8ce0ec1cc7f228491ac5a8fe8 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Wed, 24 Apr 2024 09:34:46 -0700 Subject: [PATCH 42/44] Regenerate test cases and docs for vector crypto --- .../vector-crypto/api-testing/vaesdf.c | 8 - .../vector-crypto/api-testing/vaesdm.c | 8 - .../vector-crypto/api-testing/vaesef.c | 8 - .../vector-crypto/api-testing/vaesem.c | 8 - .../vector-crypto/api-testing/vaeskf1.c | 4 - .../vector-crypto/api-testing/vaeskf2.c | 4 - .../vector-crypto/api-testing/vaesz.c | 8 - .../vector-crypto/api-testing/vandn.c | 180 +- .../vector-crypto/api-testing/vbrev.c | 92 +- .../vector-crypto/api-testing/vbrev8.c | 92 +- .../vector-crypto/api-testing/vclmul.c | 36 +- .../vector-crypto/api-testing/vclmulh.c | 36 +- .../vector-crypto/api-testing/vclz.c | 92 +- .../vector-crypto/api-testing/vctz.c | 92 +- .../vector-crypto/api-testing/vghsh.c | 4 - .../vector-crypto/api-testing/vgmul.c | 4 - .../vector-crypto/api-testing/vrev8.c | 92 +- .../vector-crypto/api-testing/vrol.c | 180 +- .../vector-crypto/api-testing/vror.c | 180 +- .../vector-crypto/api-testing/vsha2ch.c | 4 - .../vector-crypto/api-testing/vsha2cl.c | 4 - .../vector-crypto/api-testing/vsha2ms.c | 4 - .../vector-crypto/api-testing/vsm3c.c | 4 - .../vector-crypto/api-testing/vsm3me.c | 4 - .../vector-crypto/api-testing/vsm4k.c | 4 - .../vector-crypto/api-testing/vsm4r.c | 8 - .../vector-crypto/api-testing/vwsll.c | 124 +- .../vector-crypto/intrinsic_funcs.md | 777 +++--- ...bit-manipulation_used_in_cryptography.adoc | 586 +++++ ...r_bit-manipulation_used_in_cryptography.md | 581 ----- ...vbc_-_vector_carryless_multiplication.adoc | 42 + ..._zvbc_-_vector_carryless_multiplication.md | 41 - ...gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} | 11 +- ..._nist_suite:_vector_aes_block_cipher.adoc} | 43 +- ...nist_suite:_vector_sha-2_secure_hash.adoc} | 20 +- ...ed_-_shangmi_suite:_sm4_block_cipher.adoc} | 21 +- ...ksh_-_shangmi_suite:_sm3_secure_hash.adoc} | 20 +- .../vector-crypto/llvm-api-tests/vaesdf.c | 64 +- .../vector-crypto/llvm-api-tests/vaesdm.c | 64 +- .../vector-crypto/llvm-api-tests/vaesef.c | 64 +- .../vector-crypto/llvm-api-tests/vaesem.c | 64 +- .../vector-crypto/llvm-api-tests/vaeskf1.c | 15 +- .../vector-crypto/llvm-api-tests/vaeskf2.c | 18 +- .../vector-crypto/llvm-api-tests/vaesz.c | 61 +- .../vector-crypto/llvm-api-tests/vandn.c | 244 +- .../vector-crypto/llvm-api-tests/vbrev.c | 103 +- .../vector-crypto/llvm-api-tests/vbrev8.c | 103 +- .../vector-crypto/llvm-api-tests/vclmul.c | 55 +- .../vector-crypto/llvm-api-tests/vclmulh.c | 55 +- .../vector-crypto/llvm-api-tests/vclz.c | 103 +- .../vector-crypto/llvm-api-tests/vctz.c | 103 +- .../vector-crypto/llvm-api-tests/vghsh.c | 30 +- .../vector-crypto/llvm-api-tests/vgmul.c | 18 +- .../vector-crypto/llvm-api-tests/vrev8.c | 103 +- .../vector-crypto/llvm-api-tests/vrol.c | 244 +- .../vector-crypto/llvm-api-tests/vror.c | 244 +- .../vector-crypto/llvm-api-tests/vsha2ch.c | 42 +- .../vector-crypto/llvm-api-tests/vsha2cl.c | 42 +- .../vector-crypto/llvm-api-tests/vsha2ms.c | 42 +- .../vector-crypto/llvm-api-tests/vsm3c.c | 18 +- .../vector-crypto/llvm-api-tests/vsm3me.c | 18 +- .../vector-crypto/llvm-api-tests/vsm4k.c | 15 +- .../vector-crypto/llvm-api-tests/vsm4r.c | 64 +- .../vector-crypto/llvm-api-tests/vwsll.c | 168 +- .../llvm-overloaded-tests/vaesdf.c | 64 +- .../llvm-overloaded-tests/vaesdm.c | 64 +- .../llvm-overloaded-tests/vaesef.c | 64 +- .../llvm-overloaded-tests/vaesem.c | 64 +- .../llvm-overloaded-tests/vaeskf1.c | 15 +- .../llvm-overloaded-tests/vaeskf2.c | 18 +- .../llvm-overloaded-tests/vaesz.c | 61 +- .../llvm-overloaded-tests/vandn.c | 244 +- .../llvm-overloaded-tests/vbrev.c | 103 +- .../llvm-overloaded-tests/vbrev8.c | 103 +- .../llvm-overloaded-tests/vclmul.c | 55 +- .../llvm-overloaded-tests/vclmulh.c | 55 +- .../llvm-overloaded-tests/vclz.c | 103 +- .../llvm-overloaded-tests/vctz.c | 103 +- .../llvm-overloaded-tests/vghsh.c | 30 +- .../llvm-overloaded-tests/vgmul.c | 18 +- .../llvm-overloaded-tests/vrev8.c | 103 +- .../llvm-overloaded-tests/vrol.c | 244 +- .../llvm-overloaded-tests/vror.c | 244 +- .../llvm-overloaded-tests/vsha2ch.c | 42 +- .../llvm-overloaded-tests/vsha2cl.c | 42 +- .../llvm-overloaded-tests/vsha2ms.c | 42 +- .../llvm-overloaded-tests/vsm3c.c | 18 +- .../llvm-overloaded-tests/vsm3me.c | 18 +- .../llvm-overloaded-tests/vsm4k.c | 15 +- .../llvm-overloaded-tests/vsm4r.c | 64 +- .../llvm-overloaded-tests/vwsll.c | 168 +- .../overloaded-api-testing/vaesdf.c | 8 - .../overloaded-api-testing/vaesdm.c | 8 - .../overloaded-api-testing/vaesef.c | 8 - .../overloaded-api-testing/vaesem.c | 8 - .../overloaded-api-testing/vaeskf1.c | 4 - .../overloaded-api-testing/vaeskf2.c | 4 - .../overloaded-api-testing/vaesz.c | 8 - .../overloaded-api-testing/vandn.c | 180 +- .../overloaded-api-testing/vbrev.c | 92 +- .../overloaded-api-testing/vbrev8.c | 92 +- .../overloaded-api-testing/vclmul.c | 36 +- .../overloaded-api-testing/vclmulh.c | 36 +- .../overloaded-api-testing/vclz.c | 92 +- .../overloaded-api-testing/vctz.c | 92 +- .../overloaded-api-testing/vghsh.c | 4 - .../overloaded-api-testing/vgmul.c | 4 - .../overloaded-api-testing/vrev8.c | 92 +- .../overloaded-api-testing/vrol.c | 180 +- .../overloaded-api-testing/vror.c | 180 +- .../overloaded-api-testing/vsha2ch.c | 4 - .../overloaded-api-testing/vsha2cl.c | 4 - .../overloaded-api-testing/vsha2ms.c | 4 - .../overloaded-api-testing/vsm3c.c | 4 - .../overloaded-api-testing/vsm3me.c | 4 - .../overloaded-api-testing/vsm4k.c | 4 - .../overloaded-api-testing/vsm4r.c | 8 - .../overloaded-api-testing/vwsll.c | 124 +- .../overloaded_intrinsic_funcs.md | 777 +++--- ...bit-manipulation_used_in_cryptography.adoc | 586 +++++ ...r_bit-manipulation_used_in_cryptography.md | 581 ----- ...vbc_-_vector_carryless_multiplication.adoc | 42 + ..._zvbc_-_vector_carryless_multiplication.md | 41 - ...gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} | 11 +- ..._nist_suite:_vector_aes_block_cipher.adoc} | 43 +- ...nist_suite:_vector_sha-2_secure_hash.adoc} | 20 +- ...ed_-_shangmi_suite:_sm4_block_cipher.adoc} | 21 +- ...ksh_-_shangmi_suite:_sm3_secure_hash.adoc} | 20 +- .../policy_funcs/api-testing/vaesdf.c | 8 - .../policy_funcs/api-testing/vaesdm.c | 8 - .../policy_funcs/api-testing/vaesef.c | 8 - .../policy_funcs/api-testing/vaesem.c | 8 - .../policy_funcs/api-testing/vaeskf1.c | 24 +- .../policy_funcs/api-testing/vaeskf2.c | 4 - .../policy_funcs/api-testing/vaesz.c | 8 - .../policy_funcs/api-testing/vandn.c | 708 +++--- .../policy_funcs/api-testing/vbrev.c | 356 ++- .../policy_funcs/api-testing/vbrev8.c | 356 ++- .../policy_funcs/api-testing/vclmul.c | 132 +- .../policy_funcs/api-testing/vclmulh.c | 132 +- .../policy_funcs/api-testing/vghsh.c | 4 - .../policy_funcs/api-testing/vgmul.c | 4 - .../policy_funcs/api-testing/vrev8.c | 356 ++- .../policy_funcs/api-testing/vrol.c | 708 +++--- .../policy_funcs/api-testing/vror.c | 708 +++--- .../policy_funcs/api-testing/vsha2ch.c | 4 - .../policy_funcs/api-testing/vsha2cl.c | 4 - .../policy_funcs/api-testing/vsha2ms.c | 4 - .../policy_funcs/api-testing/vsm3c.c | 4 - .../policy_funcs/api-testing/vsm3me.c | 24 +- .../policy_funcs/api-testing/vsm4k.c | 24 +- .../policy_funcs/api-testing/vsm4r.c | 8 - .../policy_funcs/api-testing/vwsll.c | 484 ++-- .../policy_funcs/intrinsic_funcs.md | 2151 +++++++++-------- ...bit-manipulation_used_in_cryptography.adoc | 958 ++++++++ ...r_bit-manipulation_used_in_cryptography.md | 953 -------- ...vbc_-_vector_carryless_multiplication.adoc | 76 + ..._zvbc_-_vector_carryless_multiplication.md | 75 - ...gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} | 11 +- ..._nist_suite:_vector_aes_block_cipher.adoc} | 53 +- ...nist_suite:_vector_sha-2_secure_hash.adoc} | 20 +- ...ed_-_shangmi_suite:_sm4_block_cipher.adoc} | 31 +- ...vksh_-_shangmi_suite:_sm3_secure_hash.adoc | 26 + ..._zvksh_-_shangmi_suite:_sm3_secure_hash.md | 24 - .../policy_funcs/llvm-api-tests/vaesdf.c | 19 +- .../policy_funcs/llvm-api-tests/vaesdm.c | 19 +- .../policy_funcs/llvm-api-tests/vaesef.c | 19 +- .../policy_funcs/llvm-api-tests/vaesem.c | 19 +- .../policy_funcs/llvm-api-tests/vaeskf1.c | 35 +- .../policy_funcs/llvm-api-tests/vaeskf2.c | 15 +- .../policy_funcs/llvm-api-tests/vaesz.c | 19 +- .../policy_funcs/llvm-api-tests/vandn.c | 719 +++--- .../policy_funcs/llvm-api-tests/vbrev.c | 367 ++- .../policy_funcs/llvm-api-tests/vbrev8.c | 367 ++- .../policy_funcs/llvm-api-tests/vclmul.c | 143 +- .../policy_funcs/llvm-api-tests/vclmulh.c | 143 +- .../policy_funcs/llvm-api-tests/vghsh.c | 15 +- .../policy_funcs/llvm-api-tests/vgmul.c | 15 +- .../policy_funcs/llvm-api-tests/vrev8.c | 367 ++- .../policy_funcs/llvm-api-tests/vrol.c | 719 +++--- .../policy_funcs/llvm-api-tests/vror.c | 719 +++--- .../policy_funcs/llvm-api-tests/vsha2ch.c | 15 +- .../policy_funcs/llvm-api-tests/vsha2cl.c | 15 +- .../policy_funcs/llvm-api-tests/vsha2ms.c | 15 +- .../policy_funcs/llvm-api-tests/vsm3c.c | 15 +- .../policy_funcs/llvm-api-tests/vsm3me.c | 35 +- .../policy_funcs/llvm-api-tests/vsm4k.c | 35 +- .../policy_funcs/llvm-api-tests/vsm4r.c | 19 +- .../policy_funcs/llvm-api-tests/vwsll.c | 495 ++-- .../llvm-overloaded-tests/vaesdf.c | 76 +- .../llvm-overloaded-tests/vaesdm.c | 76 +- .../llvm-overloaded-tests/vaesef.c | 76 +- .../llvm-overloaded-tests/vaesem.c | 76 +- .../llvm-overloaded-tests/vaeskf1.c | 40 +- .../llvm-overloaded-tests/vaeskf2.c | 30 +- .../llvm-overloaded-tests/vaesz.c | 61 +- .../llvm-overloaded-tests/vandn.c | 952 +++++--- .../llvm-overloaded-tests/vbrev.c | 436 ++-- .../llvm-overloaded-tests/vbrev8.c | 436 ++-- .../llvm-overloaded-tests/vclmul.c | 191 +- .../llvm-overloaded-tests/vclmulh.c | 195 +- .../llvm-overloaded-tests/vghsh.c | 30 +- .../llvm-overloaded-tests/vgmul.c | 18 +- .../llvm-overloaded-tests/vrev8.c | 436 ++-- .../policy_funcs/llvm-overloaded-tests/vrol.c | 928 ++++--- .../policy_funcs/llvm-overloaded-tests/vror.c | 928 ++++--- .../llvm-overloaded-tests/vsha2ch.c | 42 +- .../llvm-overloaded-tests/vsha2cl.c | 42 +- .../llvm-overloaded-tests/vsha2ms.c | 42 +- .../llvm-overloaded-tests/vsm3c.c | 18 +- .../llvm-overloaded-tests/vsm3me.c | 40 +- .../llvm-overloaded-tests/vsm4k.c | 36 +- .../llvm-overloaded-tests/vsm4r.c | 64 +- .../llvm-overloaded-tests/vwsll.c | 652 +++-- .../overloaded-api-testing/vaesdf.c | 67 +- .../overloaded-api-testing/vaesdm.c | 67 +- .../overloaded-api-testing/vaesef.c | 67 +- .../overloaded-api-testing/vaesem.c | 67 +- .../overloaded-api-testing/vaeskf1.c | 31 +- .../overloaded-api-testing/vaeskf2.c | 21 +- .../overloaded-api-testing/vaesz.c | 52 +- .../overloaded-api-testing/vandn.c | 943 +++++--- .../overloaded-api-testing/vbrev.c | 427 ++-- .../overloaded-api-testing/vbrev8.c | 427 ++-- .../overloaded-api-testing/vclmul.c | 182 +- .../overloaded-api-testing/vclmulh.c | 186 +- .../overloaded-api-testing/vghsh.c | 21 +- .../overloaded-api-testing/vgmul.c | 9 +- .../overloaded-api-testing/vrev8.c | 427 ++-- .../overloaded-api-testing/vrol.c | 919 ++++--- .../overloaded-api-testing/vror.c | 919 ++++--- .../overloaded-api-testing/vsha2ch.c | 33 +- .../overloaded-api-testing/vsha2cl.c | 33 +- .../overloaded-api-testing/vsha2ms.c | 33 +- .../overloaded-api-testing/vsm3c.c | 9 +- .../overloaded-api-testing/vsm3me.c | 31 +- .../overloaded-api-testing/vsm4k.c | 27 +- .../overloaded-api-testing/vsm4r.c | 55 +- .../overloaded-api-testing/vwsll.c | 643 +++-- .../overloaded_intrinsic_funcs.md | 2151 +++++++++-------- ...bit-manipulation_used_in_cryptography.adoc | 958 ++++++++ ...r_bit-manipulation_used_in_cryptography.md | 953 -------- ...vbc_-_vector_carryless_multiplication.adoc | 76 + ..._zvbc_-_vector_carryless_multiplication.md | 75 - ...gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} | 11 +- ..._nist_suite:_vector_aes_block_cipher.adoc} | 53 +- ...nist_suite:_vector_sha-2_secure_hash.adoc} | 20 +- ...ed_-_shangmi_suite:_sm4_block_cipher.adoc} | 31 +- ...vksh_-_shangmi_suite:_sm3_secure_hash.adoc | 26 + ..._zvksh_-_shangmi_suite:_sm3_secure_hash.md | 24 - 250 files changed, 21273 insertions(+), 18731 deletions(-) create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc delete mode 100644 auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc delete mode 100644 auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md rename auto-generated/vector-crypto/intrinsic_funcs/{02_zvkg_-_vector_gcm_gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} (92%) rename auto-generated/vector-crypto/intrinsic_funcs/{03_zvkned_-_nist_suite:_vector_aes_block_cipher.md => 03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc} (92%) rename auto-generated/vector-crypto/intrinsic_funcs/{04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md => 04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc} (93%) rename auto-generated/vector-crypto/intrinsic_funcs/{05_zvksed_-_shangmi_suite:_sm4_block_cipher.md => 05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc} (89%) rename auto-generated/vector-crypto/intrinsic_funcs/{06_zvksh_-_shangmi_suite:_sm3_secure_hash.md => 06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc} (84%) create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc delete mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc delete mode 100644 auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md rename auto-generated/vector-crypto/overloaded_intrinsic_funcs/{02_zvkg_-_vector_gcm_gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} (91%) rename auto-generated/vector-crypto/overloaded_intrinsic_funcs/{03_zvkned_-_nist_suite:_vector_aes_block_cipher.md => 03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc} (91%) rename auto-generated/vector-crypto/overloaded_intrinsic_funcs/{04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md => 04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc} (92%) rename auto-generated/vector-crypto/overloaded_intrinsic_funcs/{05_zvksed_-_shangmi_suite:_sm4_block_cipher.md => 05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc} (88%) rename auto-generated/vector-crypto/overloaded_intrinsic_funcs/{06_zvksh_-_shangmi_suite:_sm3_secure_hash.md => 06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc} (82%) create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc delete mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc delete mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md rename auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/{02_zvkg_-_vector_gcm_gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} (91%) rename auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/{03_zvkned_-_nist_suite:_vector_aes_block_cipher.md => 03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc} (87%) rename auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/{04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md => 04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc} (93%) rename auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/{05_zvksed_-_shangmi_suite:_sm4_block_cipher.md => 05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc} (68%) create mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc delete mode 100644 auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc delete mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc delete mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md rename auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/{02_zvkg_-_vector_gcm_gmac.md => 02_zvkg_-_vector_gcm_gmac.adoc} (90%) rename auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/{03_zvkned_-_nist_suite:_vector_aes_block_cipher.md => 03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc} (86%) rename auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/{04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md => 04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc} (92%) rename auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/{05_zvksed_-_shangmi_suite:_sm4_block_cipher.md => 05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc} (67%) create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc delete mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md diff --git a/auto-generated/vector-crypto/api-testing/vaesdf.c b/auto-generated/vector-crypto/api-testing/vaesdf.c index fac9c44ee..e5b912a42 100644 --- a/auto-generated/vector-crypto/api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/api-testing/vaesdf.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/api-testing/vaesdm.c b/auto-generated/vector-crypto/api-testing/vaesdm.c index 17261e874..903beeddf 100644 --- a/auto-generated/vector-crypto/api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/api-testing/vaesdm.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/api-testing/vaesef.c b/auto-generated/vector-crypto/api-testing/vaesef.c index 683ac6669..375059d4d 100644 --- a/auto-generated/vector-crypto/api-testing/vaesef.c +++ b/auto-generated/vector-crypto/api-testing/vaesef.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/api-testing/vaesem.c b/auto-generated/vector-crypto/api-testing/vaesem.c index dc67813e1..76aa9d61b 100644 --- a/auto-generated/vector-crypto/api-testing/vaesem.c +++ b/auto-generated/vector-crypto/api-testing/vaesem.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/api-testing/vaeskf1.c b/auto-generated/vector-crypto/api-testing/vaeskf1.c index 0d55e93ac..a6f2fbd00 100644 --- a/auto-generated/vector-crypto/api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/api-testing/vaeskf1.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vaeskf2.c b/auto-generated/vector-crypto/api-testing/vaeskf2.c index 7509d6775..060b9874f 100644 --- a/auto-generated/vector-crypto/api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/api-testing/vaeskf2.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vaesz.c b/auto-generated/vector-crypto/api-testing/vaesz.c index e5137944a..f3c6760ce 100644 --- a/auto-generated/vector-crypto/api-testing/vaesz.c +++ b/auto-generated/vector-crypto/api-testing/vaesz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); } @@ -59,8 +56,3 @@ vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/api-testing/vandn.c b/auto-generated/vector-crypto/api-testing/vandn.c index 50ca46138..7400c8a58 100644 --- a/auto-generated/vector-crypto/api-testing/vandn.c +++ b/auto-generated/vector-crypto/api-testing/vandn.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); } @@ -180,179 +177,178 @@ vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vandn_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_m(mask, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_m(mask, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_m(mask, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_m(mask, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_m(mask, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_m(mask, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_m(mask, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_m(mask, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_m(mask, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_m(mask, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_m(mask, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_m(mask, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_m(mask, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_m(mask, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vbrev.c b/auto-generated/vector-crypto/api-testing/vbrev.c index 97d4855ac..fd22f6114 100644 --- a/auto-generated/vector-crypto/api-testing/vbrev.c +++ b/auto-generated/vector-crypto/api-testing/vbrev.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vbrev_v_u8mf8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev_v_u64m8(vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vbrev8.c b/auto-generated/vector-crypto/api-testing/vbrev8.c index 323154304..6d29c2665 100644 --- a/auto-generated/vector-crypto/api-testing/vbrev8.c +++ b/auto-generated/vector-crypto/api-testing/vbrev8.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vbrev8_v_u8mf8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev8_v_u64m8(vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vclmul.c b/auto-generated/vector-crypto/api-testing/vclmul.c index 615da37c2..3fd21fa7f 100644 --- a/auto-generated/vector-crypto/api-testing/vclmul.c +++ b/auto-generated/vector-crypto/api-testing/vclmul.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); } @@ -36,35 +33,34 @@ vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vclmulh.c b/auto-generated/vector-crypto/api-testing/vclmulh.c index 37795dc1a..a4c69311e 100644 --- a/auto-generated/vector-crypto/api-testing/vclmulh.c +++ b/auto-generated/vector-crypto/api-testing/vclmulh.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); } @@ -36,35 +33,34 @@ vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vclz.c b/auto-generated/vector-crypto/api-testing/vclz.c index 655af1c63..1fa92a927 100644 --- a/auto-generated/vector-crypto/api-testing/vclz.c +++ b/auto-generated/vector-crypto/api-testing/vclz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vclz_v_u8mf8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vclz_v_u64m8(vs2, vl); } -vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vclz_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vclz_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vclz_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vclz_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vclz_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vclz_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vclz_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vclz_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vclz_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vclz_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vclz_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vclz_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vclz_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vclz_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vclz_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vclz_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vclz_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vclz_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vclz_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vclz_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vclz_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vclz_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vctz.c b/auto-generated/vector-crypto/api-testing/vctz.c index 262e6be9b..eadb46e90 100644 --- a/auto-generated/vector-crypto/api-testing/vctz.c +++ b/auto-generated/vector-crypto/api-testing/vctz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vctz_v_u8mf8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vctz_v_u64m8(vs2, vl); } -vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vctz_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vctz_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vctz_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vctz_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vctz_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vctz_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vctz_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vctz_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vctz_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vctz_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vctz_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vctz_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vctz_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vctz_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vctz_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vctz_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vctz_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vctz_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vctz_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vctz_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vctz_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vctz_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vghsh.c b/auto-generated/vector-crypto/api-testing/vghsh.c index b93ebfa2f..accbf01e5 100644 --- a/auto-generated/vector-crypto/api-testing/vghsh.c +++ b/auto-generated/vector-crypto/api-testing/vghsh.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1 vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vgmul.c b/auto-generated/vector-crypto/api-testing/vgmul.c index 09521d4d0..4d9028a54 100644 --- a/auto-generated/vector-crypto/api-testing/vgmul.c +++ b/auto-generated/vector-crypto/api-testing/vgmul.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul_vv_u32m8(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vrev8.c b/auto-generated/vector-crypto/api-testing/vrev8.c index 9d2ea220c..c0b367a61 100644 --- a/auto-generated/vector-crypto/api-testing/vrev8.c +++ b/auto-generated/vector-crypto/api-testing/vrev8.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vrev8_v_u8mf8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vrev8_v_u64m8(vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vrol.c b/auto-generated/vector-crypto/api-testing/vrol.c index 41fdc7637..f4ee9ffbb 100644 --- a/auto-generated/vector-crypto/api-testing/vrol.c +++ b/auto-generated/vector-crypto/api-testing/vrol.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); } @@ -180,179 +177,178 @@ vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_m(mask, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_m(mask, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_m(mask, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_m(mask, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_m(mask, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_m(mask, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_m(mask, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_m(mask, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_m(mask, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_m(mask, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_m(mask, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_m(mask, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_m(mask, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_m(mask, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vror.c b/auto-generated/vector-crypto/api-testing/vror.c index c00b0b98e..9c8f32431 100644 --- a/auto-generated/vector-crypto/api-testing/vror.c +++ b/auto-generated/vector-crypto/api-testing/vror.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vror_vv_u8mf8(vs2, vs1, vl); } @@ -180,179 +177,178 @@ vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_m(mask, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_m(mask, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_m(mask, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_m(mask, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_m(mask, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_m(mask, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_m(mask, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_m(mask, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_m(mask, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_m(mask, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_m(mask, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_m(mask, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_m(mask, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_m(mask, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsha2ch.c b/auto-generated/vector-crypto/api-testing/vsha2ch.c index 8407a75e1..89c32480f 100644 --- a/auto-generated/vector-crypto/api-testing/vsha2ch.c +++ b/auto-generated/vector-crypto/api-testing/vsha2ch.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t v vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsha2cl.c b/auto-generated/vector-crypto/api-testing/vsha2cl.c index e7a37c2e7..f213d6477 100644 --- a/auto-generated/vector-crypto/api-testing/vsha2cl.c +++ b/auto-generated/vector-crypto/api-testing/vsha2cl.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t v vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsha2ms.c b/auto-generated/vector-crypto/api-testing/vsha2ms.c index 65b6fc728..77ef0289a 100644 --- a/auto-generated/vector-crypto/api-testing/vsha2ms.c +++ b/auto-generated/vector-crypto/api-testing/vsha2ms.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t v vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsm3c.c b/auto-generated/vector-crypto/api-testing/vsm3c.c index 355f4a519..67d0f776f 100644 --- a/auto-generated/vector-crypto/api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/api-testing/vsm3c.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsm3me.c b/auto-generated/vector-crypto/api-testing/vsm3me.c index 5dd3d4007..5307ba8bb 100644 --- a/auto-generated/vector-crypto/api-testing/vsm3me.c +++ b/auto-generated/vector-crypto/api-testing/vsm3me.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsm4k.c b/auto-generated/vector-crypto/api-testing/vsm4k.c index d038e7157..a33e29d8a 100644 --- a/auto-generated/vector-crypto/api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/api-testing/vsm4k.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/api-testing/vsm4r.c b/auto-generated/vector-crypto/api-testing/vsm4r.c index d690e5618..b0c2fdfe1 100644 --- a/auto-generated/vector-crypto/api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/api-testing/vsm4r.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/api-testing/vwsll.c b/auto-generated/vector-crypto/api-testing/vwsll.c index 270591974..5e6a1a884 100644 --- a/auto-generated/vector-crypto/api-testing/vwsll.c +++ b/auto-generated/vector-crypto/api-testing/vwsll.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); } @@ -124,123 +121,122 @@ vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index b5690f43c..4b6c01fc4 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -1,10 +1,11 @@ -## Zvbb - Vector Bit-manipulation used in Cryptography: +=== Zvbb - Vector Bit-manipulation used in Cryptography -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): +[[]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not -**Prototypes:** -``` C +[,c] +---- vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); @@ -50,56 +51,57 @@ vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements + +[,c] +---- vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); @@ -167,78 +169,79 @@ vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Count Bits](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); @@ -284,56 +287,57 @@ vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Rotate](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); @@ -423,100 +427,101 @@ vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Shift](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); @@ -548,44 +553,45 @@ vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -``` - -## Zvbc - Vector Carryless Multiplication: - -### [Vector Carryless Multiplication](): - -**Prototypes:** -``` C +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[]] +==== Vector Carryless Multiplication + +[,c] +---- vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); @@ -603,30 +609,31 @@ vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -## Zvkg - Vector GCM/GMAC: - -### [Vector GCM/GMAC](): - -**Prototypes:** -``` C +vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[]] +==== Vector GCM/GMAC + +[,c] +---- vuint32mf2_t __riscv_vghsh_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -637,14 +644,15 @@ vuint32m1_t __riscv_vgmul_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -664,7 +672,6 @@ vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -684,13 +691,13 @@ vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -710,7 +717,6 @@ vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -730,13 +736,13 @@ vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaeskf1_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); @@ -747,12 +753,13 @@ vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t ui vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -767,15 +774,15 @@ vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_ vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -785,12 +792,13 @@ vuint64m1_t __riscv_vsha2ms_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1 vuint64m2_t __riscv_vsha2ms_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -809,25 +817,27 @@ vuint64m1_t __riscv_vsha2cl_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1 vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4k_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm4k_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm4k_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm4k_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector SM4 Rounds](): +[[]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -847,29 +857,30 @@ vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvksh - ShangMi Suite: SM3 Secure Hash: +=== Zvksh - ShangMi Suite: SM3 Secure Hash -### [Vector SM3 Message Expansion](): +[[]] +==== Vector SM3 Message Expansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3me_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsm3me_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsm3me_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); vuint32m4_t __riscv_vsm3me_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint32m8_t __riscv_vsm3me_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` +---- -### [Vector SM3 Message Expansion](): +[[]] +==== Vector SM3 Compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3c_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm3c_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm3c_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm3c_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm3c_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..25b9c4d67 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,586 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4 (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2 (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1 (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2 (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4 (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8 (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4 (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2 (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1 (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2 (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4 (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8 (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2 (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1 (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2 (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4 (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8 (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md deleted file mode 100644 index 26b6260a4..000000000 --- a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ /dev/null @@ -1,581 +0,0 @@ - -## Zvbb - Vector Bit-manipulation used in Cryptography: - -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4 (vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2 (vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1 (vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2 (vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4 (vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8 (vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4 (vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2 (vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1 (vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2 (vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4 (vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8 (vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2 (vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1 (vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2 (vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4 (vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8 (vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8 (vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Basic Bit-manipulation - Count Bits](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vclz_v_u8m1 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vclz_v_u8m2 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vclz_v_u8m4 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vclz_v_u8m8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vclz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vclz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vclz_v_u16m1 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vclz_v_u16m2 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vclz_v_u16m4 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vclz_v_u16m8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vclz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vclz_v_u32m1 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vclz_v_u32m2 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vclz_v_u32m4 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vclz_v_u32m8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vclz_v_u64m1 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vclz_v_u64m2 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vclz_v_u64m4 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vclz_v_u64m8 (vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vctz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vctz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vctz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vctz_v_u8m1 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vctz_v_u8m2 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vctz_v_u8m4 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vctz_v_u8m8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vctz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vctz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vctz_v_u16m1 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vctz_v_u16m2 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vctz_v_u16m4 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vctz_v_u16m8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vctz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vctz_v_u32m1 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vctz_v_u32m2 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vctz_v_u32m4 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vctz_v_u32m8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vctz_v_u64m1 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Rotate](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Shift](): - -**Prototypes:** -``` C -vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..6e9c0a1b9 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,42 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md deleted file mode 100644 index 4d41e53cc..000000000 --- a/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md +++ /dev/null @@ -1,41 +0,0 @@ - -## Zvbc - Vector Carryless Multiplication: - -### [Vector Carryless Multiplication](): - -**Prototypes:** -``` C -vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc similarity index 92% rename from auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md rename to auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc index 5e3e8fcf8..83f9816cb 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -1,10 +1,11 @@ -## Zvkg - Vector GCM/GMAC: +=== Zvkg - Vector GCM/GMAC -### [Vector GCM/GMAC](): +[[]] +==== Vector GCM/GMAC -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vghsh_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -15,4 +16,4 @@ vuint32m1_t __riscv_vgmul_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc similarity index 92% rename from auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md rename to auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc index 5a9f440a2..929328cba 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -1,10 +1,11 @@ -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -24,7 +25,6 @@ vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -44,13 +44,13 @@ vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -70,7 +70,6 @@ vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -90,13 +89,13 @@ vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaeskf1_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); @@ -107,12 +106,13 @@ vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t ui vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -127,5 +127,4 @@ vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_ vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc similarity index 93% rename from auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md rename to auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc index 90db92cd4..6ce0c9cf6 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -1,10 +1,11 @@ -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -14,12 +15,13 @@ vuint64m1_t __riscv_vsha2ms_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1 vuint64m2_t __riscv_vsha2ms_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -38,4 +40,4 @@ vuint64m1_t __riscv_vsha2cl_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1 vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc similarity index 89% rename from auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md rename to auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc index ad5aeec27..55a267250 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -1,21 +1,23 @@ -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4k_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm4k_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm4k_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm4k_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector SM4 Rounds](): +[[]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -35,5 +37,4 @@ vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc similarity index 84% rename from auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md rename to auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc index 621c42e24..a83f0b809 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md +++ b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -1,24 +1,26 @@ -## Zvksh - ShangMi Suite: SM3 Secure Hash: +=== Zvksh - ShangMi Suite: SM3 Secure Hash -### [Vector SM3 Message Expansion](): +[[]] +==== Vector SM3 Message Expansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3me_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsm3me_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsm3me_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); vuint32m4_t __riscv_vsm3me_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint32m8_t __riscv_vsm3me_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` +---- -### [Vector SM3 Message Expansion](): +[[]] +==== Vector SM3 Compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3c_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm3c_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm3c_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm3c_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm3c_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c index 04a638391..715c7881c 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c index ba0c355d0..c35b87b37 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c index 0d1e8d720..081cfe140 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c index 79d397e54..cf43774f1 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c index 3b9857d2c..b92fbdead 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c index fbb874289..aa796c5b2 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c index d022831f1..bdb19ece1 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -1,74 +1,83 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/llvm-api-tests/vandn.c index f26790e7f..3f8f4c0a5 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vandn.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vandn.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -68,7 +68,8 @@ vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vandn_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); } @@ -76,7 +77,8 @@ vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); } @@ -116,7 +118,8 @@ vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vandn_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); } @@ -188,179 +191,222 @@ vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vandn_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_m(mask, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_m(mask, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_m(mask, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_m(mask, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_m(mask, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_m(mask, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_m(mask, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_m(mask, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_m(mask, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_m(mask, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_m(mask, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_m(mask, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_m(mask, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_m(mask, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c index aa1f7a0e2..602551b22 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev_v_u64m8(vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c index 2ac7b751b..dbb64b45e 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev8_v_u64m8(vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c index 3751cde48..d6697a372 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -44,35 +44,42 @@ vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c index 5d0417a59..94fbc51e7 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -44,35 +44,42 @@ vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/llvm-api-tests/vclz.c index 80e369c76..6320cf1a7 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vclz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vclz.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vclz_v_u64m8(vs2, vl); } -vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vclz_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vclz_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vclz_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vclz_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vclz_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vclz_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vclz_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vclz_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vclz_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vclz_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vclz_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vclz_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vclz_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vclz_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vclz_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vclz_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vclz_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vclz_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vclz_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vclz_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vclz_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vclz_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/llvm-api-tests/vctz.c index 74863e79c..926741260 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vctz.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vctz.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vctz_v_u64m8(vs2, vl); } -vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vctz_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vctz_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vctz_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vctz_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vctz_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vctz_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vctz_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vctz_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vctz_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vctz_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vctz_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vctz_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vctz_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vctz_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vctz_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vctz_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vctz_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vctz_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vctz_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vctz_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vctz_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vctz_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c index 436349fb9..6b2db98f6 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c @@ -1,34 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); } -vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); } -vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); } -vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); } -vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c index 502aae3f8..1abf16248 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul_vv_u32m8(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c index d02393633..717dfd27d 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vrev8_v_u64m8(vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_m(mask, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_m(mask, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_m(mask, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_m(mask, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_m(mask, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_m(mask, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_m(mask, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_m(mask, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_m(mask, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_m(mask, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_m(mask, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_m(mask, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_m(mask, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_m(mask, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_m(mask, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_m(mask, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_m(mask, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_m(mask, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_m(mask, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_m(mask, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_m(mask, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_m(mask, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/llvm-api-tests/vrol.c index d02ca2e49..1bddb1516 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vrol.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vrol.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -68,7 +68,8 @@ vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); } @@ -76,7 +77,8 @@ vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); } @@ -116,7 +118,8 @@ vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); } @@ -188,179 +191,222 @@ vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_m(mask, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_m(mask, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_m(mask, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_m(mask, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_m(mask, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_m(mask, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_m(mask, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_m(mask, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_m(mask, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_m(mask, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_m(mask, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_m(mask, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_m(mask, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_m(mask, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vror.c b/auto-generated/vector-crypto/llvm-api-tests/vror.c index d800a671e..073c1fe05 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vror.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vror.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -68,7 +68,8 @@ vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vror_vv_u16mf4(vs2, vs1, vl); } @@ -76,7 +77,8 @@ vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vror_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vror_vv_u16mf2(vs2, vs1, vl); } @@ -116,7 +118,8 @@ vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vror_vv_u32mf2(vs2, vs1, vl); } @@ -188,179 +191,222 @@ vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_m(mask, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_m(mask, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_m(mask, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_m(mask, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_m(mask, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_m(mask, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_m(mask, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_m(mask, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_m(mask, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_m(mask, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_m(mask, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_m(mask, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_m(mask, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_m(mask, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c index b0a9e0220..78924df94 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c index ab5430e22..739a9da5e 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c index 0d65884e1..72201942a 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c index c3589f4af..06ae64701 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c index a286c7c26..9aefcd323 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c index 33d5ab701..e5f6bd386 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c index 59aece912..8119a4331 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_u32m8(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c index d7b1d42bb..eda2e00d3 100644 --- a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c +++ b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -60,7 +60,8 @@ vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); } @@ -132,123 +133,152 @@ vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_m(mask, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_m(mask, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_m(mask, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_m(mask, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_m(mask, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_m(mask, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_m(mask, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_m(mask, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_m(mask, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_m(mask, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_m(mask, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_m(mask, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_m(mask, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_m(mask, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_m(mask, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_m(mask, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_m(mask, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_m(mask, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_m(mask, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_m(mask, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_m(mask, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_m(mask, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_m(mask, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_m(mask, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m8_m(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c index 44803c47c..83837f66d 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs(vd, vs2, vl); } vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c index bcb5a0b36..6bc6faa5b 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs(vd, vs2, vl); } vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c index 2768f4a6b..a42aac84e 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs(vd, vs2, vl); } vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c index 1cbdce0bc..2cb5113a7 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs(vd, vs2, vl); } vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c index f1048cd5a..393a2329f 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c index 3387d50cf..e1d85453a 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaeskf2(vd, vs2, 0, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c index 93e2e3eef..b98fe52ba 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -1,74 +1,83 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c index 7a0f664b0..302997b03 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -68,7 +68,8 @@ vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vandn(vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vandn(vs2, vs1, vl); } @@ -76,7 +77,8 @@ vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vandn(vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vandn(vs2, vs1, vl); } @@ -116,7 +118,8 @@ vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vandn(vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vandn(vs2, vs1, vl); } @@ -188,179 +191,222 @@ vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vandn(vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c index 579baaf6d..9654a13b5 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev(vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c index 980e7f4ae..68503540f 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev8(vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c index b8143f3f0..994a54025 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -44,35 +44,42 @@ vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmul(vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c index a17c3752e..fbfa406f6 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -44,35 +44,42 @@ vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmulh(vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c index fb2c59218..c6a727dfc 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vclz(vs2, vl); } -vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c index cfaa556ee..10223ef94 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vctz(vs2, vl); } -vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c index afbd4dbc8..bd18a2c0e 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c @@ -1,34 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } -vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } -vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } -vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } -vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c index a091c69a7..ed81badec 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vgmul(vd, vs2, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c index 3e29204b8..6f491581c 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -100,91 +100,90 @@ vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vrev8(vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c index 1007fbea4..2e24afa14 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -68,7 +68,8 @@ vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol(vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrol(vs2, vs1, vl); } @@ -76,7 +77,8 @@ vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vrol(vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrol(vs2, vs1, vl); } @@ -116,7 +118,8 @@ vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol(vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrol(vs2, vs1, vl); } @@ -188,179 +191,222 @@ vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol(vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c index 5221bd3aa..6fdd3e527 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -68,7 +68,8 @@ vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror(vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vror(vs2, vs1, vl); } @@ -76,7 +77,8 @@ vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vror(vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vror(vs2, vs1, vl); } @@ -116,7 +118,8 @@ vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror(vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vror(vs2, vs1, vl); } @@ -188,179 +191,222 @@ vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror(vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c index 4c3c170db..2924cdc47 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c index ab349951b..b2078e33d 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c index 4f3c31a0d..e1afaede7 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c index 115cbf5f7..3d23e0142 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c index 3a37cbefc..86f271de7 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsm3me(vs2, vs1, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsm3me(vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c index 5ae9fdf14..248207cfc 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c index 079db5b1f..6cb46317c 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c index eb6e5858f..029180986 100644 --- a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -60,7 +60,8 @@ vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } @@ -132,123 +133,152 @@ vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c index 9807668e4..a240f30cd 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c index d9cd8ced8..44e4a38fb 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c index 96380b425..8a032c2f8 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c index 4539af8cd..e6f666ea6 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t v vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c index 8ec38cde4..73358e70e 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf1(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c index 94ff06c1a..a15310d57 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2(vd, vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c index cd9069a7e..76a5d32fc 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } @@ -59,8 +56,3 @@ vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c index e744cd9fe..61d7a594f 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vandn.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vandn(vs2, vs1, vl); } @@ -180,179 +177,178 @@ vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vandn(vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn(mask, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn(mask, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c index 8c82c5496..5a27daa73 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vbrev(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev(vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev(mask, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c index 5785a810f..9d0d77b91 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vbrev8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vbrev8(vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8(mask, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c index f751b2175..cf48adf9c 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { return __riscv_vclmul(vs2, vs1, vl); } @@ -36,35 +33,34 @@ vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmul(vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul(mask, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul(mask, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c index c7a9d9d6d..7000a93e5 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { return __riscv_vclmulh(vs2, vs1, vl); } @@ -36,35 +33,34 @@ vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vclmulh(vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh(mask, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c index 8bea51126..d93faf0f3 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vclz.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vclz(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vclz(vs2, vl); } -vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } -vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vclz(mask, vs2, vl); +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c index 86090d8aa..51d6c57e9 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vctz.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vctz(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vctz(vs2, vl); } -vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } -vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vctz(mask, vs2, vl); +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c index 8a4eb46a5..055ce6727 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1 vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c index 48c480933..4067ca01b 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vgmul(vd, vs2, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c index d013b9218..3391569f2 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { return __riscv_vrev8(vs2, vl); } @@ -92,91 +89,90 @@ vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { return __riscv_vrev8(vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8(mask, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c index dda6195ca..a1900207c 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vrol.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vrol(vs2, vs1, vl); } @@ -180,179 +177,178 @@ vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vrol(vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol(mask, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol(mask, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/overloaded-api-testing/vror.c index 600fc1d66..e87ad43c8 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vror.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vror.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vror(vs2, vs1, vl); } @@ -180,179 +177,178 @@ vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vror(vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror(mask, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror(mask, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c index e581f6f43..d04129849 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t v vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c index 9a839357b..4de7b49aa 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t v vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c index c6d912d62..70a696804 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t v vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c index a5bdb447f..728566e46 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c index 60d967f88..299159174 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsm3me(vs2, vs1, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsm3me(vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c index 06728e8dd..882694054 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { return __riscv_vsm4k(vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c index 66735b96c..cb106c8a5 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c index f90328a94..8696b7d1d 100644 --- a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c +++ b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { return __riscv_vwsll(vs2, vs1, vl); } @@ -124,123 +121,122 @@ vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vwsll(vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll(mask, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll(mask, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 6906c44bd..63ef95b6a 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -1,10 +1,11 @@ -## Zvbb - Vector Bit-manipulation used in Cryptography: +=== Zvbb - Vector Bit-manipulation used in Cryptography -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not -**Prototypes:** -``` C +[,c] +---- vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); @@ -50,56 +51,57 @@ vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements + +[,c] +---- vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); @@ -167,78 +169,79 @@ vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Count Bits](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vbrev (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); @@ -284,56 +287,57 @@ vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vclz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vclz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vclz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vclz (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vclz (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vclz (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vclz (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vclz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vclz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vclz (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vclz (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vclz (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vclz (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vclz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vclz (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vclz (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vclz (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vclz (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vclz (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vclz (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vclz (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vclz (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vctz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vctz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vctz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vctz (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vctz (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vctz (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vctz (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vctz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vctz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vctz (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vctz (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vctz (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vctz (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vctz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vctz (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vctz (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vctz (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vctz (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vctz (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vctz (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vctz (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vctz (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Rotate](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vclz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); @@ -423,100 +427,101 @@ vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Shift](): - -**Prototypes:** -``` C +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); @@ -548,44 +553,45 @@ vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -``` - -## Zvbc - Vector Carryless Multiplication: - -### [Vector Carryless Multiplication](): - -**Prototypes:** -``` C +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[overloaded-]] +==== Vector Carryless Multiplication + +[,c] +---- vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); @@ -603,30 +609,31 @@ vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -## Zvkg - Vector GCM/GMAC: - -### [Vector GCM/GMAC](): - -**Prototypes:** -``` C +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[overloaded-]] +==== Vector GCM/GMAC + +[,c] +---- vuint32mf2_t __riscv_vghsh (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -637,14 +644,15 @@ vuint32m1_t __riscv_vgmul (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[overloaded-]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -664,7 +672,6 @@ vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -684,13 +691,13 @@ vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[overloaded-]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -710,7 +717,6 @@ vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -730,13 +736,13 @@ vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[overloaded-]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaeskf1 (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); @@ -747,12 +753,13 @@ vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_ vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[overloaded-]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -767,15 +774,15 @@ vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[overloaded-]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -785,12 +792,13 @@ vuint64m1_t __riscv_vsha2ms (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, s vuint64m2_t __riscv_vsha2ms (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[overloaded-]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -809,25 +817,27 @@ vuint64m1_t __riscv_vsha2cl (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, s vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[overloaded-]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4k (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm4k (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm4k (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm4k (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector SM4 Rounds](): +[[overloaded-]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -847,29 +857,30 @@ vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvksh - ShangMi Suite: SM3 Secure Hash: +=== Zvksh - ShangMi Suite: SM3 Secure Hash -### [Vector SM3 Message Expansion](): +[[overloaded-]] +==== Vector SM3 Message Expansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3me (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsm3me (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsm3me (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); vuint32m4_t __riscv_vsm3me (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint32m8_t __riscv_vsm3me (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` +---- -### [Vector SM3 Message Expansion](): +[[overloaded-]] +==== Vector SM3 Compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3c (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm3c (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm3c (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm3c (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm3c (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..15ebc0022 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,586 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements + +[,c] +---- +vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md deleted file mode 100644 index dfe321e52..000000000 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ /dev/null @@ -1,581 +0,0 @@ - -## Zvbb - Vector Bit-manipulation used in Cryptography: - -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn (vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn (vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn (vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn (vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn (vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn (vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn (vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn (vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn (vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn (vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn (vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn (vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn (vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn (vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn (vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn (vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn (vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn (vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn (vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn (vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn (vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn (vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn (vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn (vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn (vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn (vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn (vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn (vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn (vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn (vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn (vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn (vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev (vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8 (vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8 (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8 (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8 (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8 (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8 (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8 (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8 (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8 (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8 (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8 (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8 (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8 (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8 (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8 (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8 (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8 (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8 (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8 (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8 (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8 (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8 (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8 (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8 (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8 (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8 (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8 (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8 (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8 (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8 (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8 (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8 (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8 (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8 (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8 (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8 (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8 (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8 (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8 (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8 (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8 (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8 (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Count Bits](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vclz (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vclz (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vclz (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vclz (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vclz (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vclz (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vclz (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vclz (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vclz (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vclz (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vclz (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vclz (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vclz (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vclz (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vclz (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vclz (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vclz (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vclz (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vclz (vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vctz (vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vctz (vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vctz (vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vctz (vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vctz (vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vctz (vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vctz (vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vctz (vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vctz (vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vctz (vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vctz (vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vctz (vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vctz (vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vctz (vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vctz (vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vctz (vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vctz (vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vctz (vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vctz (vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vclz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vclz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vclz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vclz (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vclz (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vclz (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vclz (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vclz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vclz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vclz (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vclz (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vclz (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vclz (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vclz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vclz (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vclz (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vclz (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vclz (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vclz (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vclz (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vclz (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vclz (vbool8_t mask, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vctz (vbool64_t mask, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vctz (vbool32_t mask, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vctz (vbool16_t mask, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vctz (vbool8_t mask, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vctz (vbool4_t mask, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vctz (vbool2_t mask, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vctz (vbool1_t mask, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vctz (vbool64_t mask, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vctz (vbool32_t mask, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vctz (vbool16_t mask, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vctz (vbool8_t mask, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vctz (vbool4_t mask, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vctz (vbool2_t mask, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vctz (vbool64_t mask, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vctz (vbool32_t mask, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vctz (vbool16_t mask, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vctz (vbool8_t mask, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vctz (vbool4_t mask, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vctz (vbool64_t mask, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vctz (vbool32_t mask, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vctz (vbool16_t mask, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vctz (vbool8_t mask, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Rotate](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol (vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol (vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol (vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol (vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol (vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol (vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol (vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol (vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol (vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol (vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol (vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol (vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol (vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol (vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol (vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol (vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror (vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror (vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror (vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror (vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror (vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror (vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror (vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror (vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror (vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror (vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror (vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror (vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror (vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror (vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror (vbool1_t mask, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror (vbool2_t mask, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror (vbool4_t mask, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror (vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror (vbool32_t mask, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror (vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror (vbool8_t mask, vuint64m8_t vs2, size_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Shift](): - -**Prototypes:** -``` C -vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll (vbool64_t mask, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll (vbool32_t mask, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll (vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll (vbool8_t mask, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll (vbool4_t mask, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll (vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll (vbool64_t mask, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll (vbool32_t mask, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll (vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll (vbool8_t mask, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll (vbool4_t mask, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll (vbool64_t mask, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll (vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll (vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll (vbool8_t mask, vuint32m4_t vs2, size_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..174233382 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,42 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[overloaded-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md deleted file mode 100644 index df952e521..000000000 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md +++ /dev/null @@ -1,41 +0,0 @@ - -## Zvbc - Vector Carryless Multiplication: - -### [Vector Carryless Multiplication](): - -**Prototypes:** -``` C -vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh (vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh (vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh (vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh (vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc similarity index 91% rename from auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md rename to auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc index 0b3bf1254..3b38c6571 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -1,10 +1,11 @@ -## Zvkg - Vector GCM/GMAC: +=== Zvkg - Vector GCM/GMAC -### [Vector GCM/GMAC](): +[[overloaded-]] +==== Vector GCM/GMAC -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vghsh (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -15,4 +16,4 @@ vuint32m1_t __riscv_vgmul (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc similarity index 91% rename from auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md rename to auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc index b750c129f..407f673d9 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -1,10 +1,11 @@ -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[overloaded-]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -24,7 +25,6 @@ vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -44,13 +44,13 @@ vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[overloaded-]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -70,7 +70,6 @@ vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -90,13 +89,13 @@ vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[overloaded-]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaeskf1 (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); @@ -107,12 +106,13 @@ vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_ vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[overloaded-]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -127,5 +127,4 @@ vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc similarity index 92% rename from auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md rename to auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc index 2b8a36920..0c818e28d 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -1,10 +1,11 @@ -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[overloaded-]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -14,12 +15,13 @@ vuint64m1_t __riscv_vsha2ms (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, s vuint64m2_t __riscv_vsha2ms (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[overloaded-]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -38,4 +40,4 @@ vuint64m1_t __riscv_vsha2cl (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, s vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc similarity index 88% rename from auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md rename to auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc index 8b9eb1b2a..f5ad8d8fa 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -1,21 +1,23 @@ -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[overloaded-]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4k (vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm4k (vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm4k (vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm4k (vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector SM4 Rounds](): +[[overloaded-]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -35,5 +37,4 @@ vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc similarity index 82% rename from auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md rename to auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc index a904879b0..ddf0b441c 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -1,24 +1,26 @@ -## Zvksh - ShangMi Suite: SM3 Secure Hash: +=== Zvksh - ShangMi Suite: SM3 Secure Hash -### [Vector SM3 Message Expansion](): +[[overloaded-]] +==== Vector SM3 Message Expansion -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3me (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsm3me (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsm3me (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); vuint32m4_t __riscv_vsm3me (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); vuint32m8_t __riscv_vsm3me (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` +---- -### [Vector SM3 Message Expansion](): +[[overloaded-]] +==== Vector SM3 Compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3c (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm3c (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm3c (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm3c (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm3c (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c index 1bc0fc4a4..43eef93e8 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c index fa4536189..3c1d89651 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c index d499b8720..1b82fcd8c 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c index 345b93db5..1db0f1bda 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c index 97339218d..4bbd0fb10 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c @@ -1,26 +1,22 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c index 2451093d1..30150c660 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c index 57a9822f3..25486191d 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); } @@ -59,8 +56,3 @@ vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c index 6cdb97418..786635b20 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c @@ -1,710 +1,706 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c index f4a0371a9..5a16e6adf 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c @@ -1,358 +1,354 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c index d9a0c3cc2..6186201fc 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c @@ -1,358 +1,354 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c index bc3add0ee..22f2b9b4b 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c @@ -1,134 +1,130 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c index 7ca88e340..a43662a06 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c @@ -1,134 +1,130 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c index 48ec0cb4b..731050d9c 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c index 13b28496d..ed035adf4 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c index e5a425b5f..ef1976f3e 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c @@ -1,358 +1,354 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_tu(maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_tu(maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_tu(maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_tu(maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_tu(maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_tu(maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_tu(maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_tu(maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_tu(maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_tu(maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_tu(maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_tu(maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c index a023644e3..d630a488c 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c @@ -1,710 +1,706 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c index c94ef3774..f62f3eb6e 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c @@ -1,710 +1,706 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c index 9940b82c2..1d9b85bc0 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_ vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c index 11360869d..468a4d938 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_ vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c index b9e9f83b2..9ee82d425 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); } @@ -39,4 +36,3 @@ vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_ vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c index b0b2246a3..f420557dc 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); } @@ -23,4 +20,3 @@ vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c index 3df7ce142..9b635b0d8 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c @@ -1,26 +1,22 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c index 05dc6da60..270812106 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c @@ -1,26 +1,22 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c index 996ca813c..4c95663f3 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c @@ -1,9 +1,6 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); } @@ -79,8 +76,3 @@ vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c index ca8992376..56b35568a 100644 --- a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c @@ -1,486 +1,482 @@ #include #include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 84544a85a..9e568e634 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -1,1038 +1,1045 @@ -## Zvbb - Vector Bit-manipulation used in Cryptography: +=== Zvbb - Vector Bit-manipulation used in Cryptography -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not -**Prototypes:** -``` C -vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements -**Prototypes:** -``` C -vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -``` +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- -### [Vector Bit-manipulation used in Cryptography - Count Bits](): -This operation don't have Policy Intrinsic Functions. +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Count Bits +Intrinsics here don't have a policy variant. -### [Vector Bit-manipulation used in Cryptography - Rotate](): +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Rotate -**Prototypes:** -``` C -vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -``` +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- -### [Vector Bit-manipulation used in Cryptography - Shift](): +[[policy-variant-]] +==== Vector Basic Bit-manipulation used - Widening Shift -**Prototypes:** -``` C -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -``` +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- -## Zvbc - Vector Carryless Multiplication: +=== Zvbc - Vector Carryless Multiplication -### [Vector Carryless Multiplication](): +[[policy-variant-]] +==== Vector Carryless Multiplication -**Prototypes:** -``` C -vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` +vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- -## Zvkg - Vector GCM/GMAC: +=== Zvkg - Vector GCM/GMAC -### [Vector GCM/GMAC](): +[[policy-variant-]] +==== Vector GCM/GMAC -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -1043,14 +1050,15 @@ vuint32m1_t __riscv_vgmul_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t v vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[policy-variant-]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1070,7 +1078,6 @@ vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1090,13 +1097,13 @@ vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[policy-variant-]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1116,7 +1123,6 @@ vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1136,29 +1142,30 @@ vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[policy-variant-]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C -vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[policy-variant-]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -1173,15 +1180,15 @@ vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, si vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[policy-variant-]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -1191,12 +1198,13 @@ vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint6 vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[policy-variant-]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -1215,25 +1223,27 @@ vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint6 vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[policy-variant-]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- -### [Vector SM4 Rounds](): +[[policy-variant-]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1253,29 +1263,30 @@ vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t v vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvksh - ShangMi Suite: SM3 Secure Hash: +=== Zvksh - ShangMi Suite: SM3 Secure Hash -### [Vector SM3 Message Expansion](): +[[policy-variant-]] +==== Vector SM3 Message Expansion -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- -### [Vector SM3 Message Expansion](): +[[policy-variant-]] +==== Vector SM3 Compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..b261c089c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,958 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Count Bits +Intrinsics here don't have a policy variant. + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md deleted file mode 100644 index 0031d9a2d..000000000 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ /dev/null @@ -1,953 +0,0 @@ - -## Zvbb - Vector Bit-manipulation used in Cryptography: - -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Count Bits](): -This operation don't have Policy Intrinsic Functions. - -### [Vector Bit-manipulation used in Cryptography - Rotate](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Shift](): - -**Prototypes:** -``` C -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..559ba54e5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,76 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md deleted file mode 100644 index 7e7effc48..000000000 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md +++ /dev/null @@ -1,75 +0,0 @@ - -## Zvbc - Vector Carryless Multiplication: - -### [Vector Carryless Multiplication](): - -**Prototypes:** -``` C -vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc similarity index 91% rename from auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md rename to auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc index 0cd0c65e3..cf2c6a401 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -1,10 +1,11 @@ -## Zvkg - Vector GCM/GMAC: +=== Zvkg - Vector GCM/GMAC -### [Vector GCM/GMAC](): +[[policy-variant-]] +==== Vector GCM/GMAC -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -15,4 +16,4 @@ vuint32m1_t __riscv_vgmul_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t v vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc similarity index 87% rename from auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md rename to auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc index 978cdee59..29d2463a1 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -1,10 +1,11 @@ -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[policy-variant-]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -24,7 +25,6 @@ vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -44,13 +44,13 @@ vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[policy-variant-]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -70,7 +70,6 @@ vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -90,29 +89,30 @@ vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[policy-variant-]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C -vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[policy-variant-]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -127,5 +127,4 @@ vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, si vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc similarity index 93% rename from auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md rename to auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc index c6a2a611f..2aec4fd51 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -1,10 +1,11 @@ -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[policy-variant-]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -14,12 +15,13 @@ vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint6 vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[policy-variant-]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -38,4 +40,4 @@ vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint6 vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc similarity index 68% rename from auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md rename to auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc index 49419391a..95d0f470f 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -1,21 +1,23 @@ -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[policy-variant-]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- -### [Vector SM4 Rounds](): +[[policy-variant-]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -35,5 +37,4 @@ vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t v vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_u32m8_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..589216717 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,26 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md deleted file mode 100644 index afc57afff..000000000 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md +++ /dev/null @@ -1,24 +0,0 @@ - -## Zvksh - ShangMi Suite: SM3 Secure Hash: - -### [Vector SM3 Message Expansion](): - -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` - -### [Vector SM3 Message Expansion](): - -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c index 7bf1af06b..990433721 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -87,8 +87,3 @@ vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c index 856cc6350..80a243721 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -87,8 +87,3 @@ vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c index a7ee09719..224ac4953 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -87,8 +87,3 @@ vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c index 3b398d5cc..fa0a10105 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -87,8 +87,3 @@ vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_ vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c index 3a70a8170..cc4667e80 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c @@ -1,34 +1,33 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c index 583d8cf43..7f05b473c 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c index d5e3ba40f..f50cae600 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -67,8 +67,3 @@ vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c index e9f99c85f..8e79acfdd 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c @@ -1,718 +1,717 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c index fb68a2faf..1faa2260e 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c @@ -1,366 +1,365 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf4_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8mf2_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m1_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m2_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m4_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u8m8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf4_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16mf2_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m1_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m2_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m4_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u16m8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32mf2_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m1_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m2_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m4_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u32m8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m1_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m2_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m4_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_v_u64m8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c index ad555f360..737992ff9 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c @@ -1,366 +1,365 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c index 0f6ab5547..c776dacad 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c @@ -1,142 +1,141 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c index 0c9384bef..94df486ca 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c @@ -1,142 +1,141 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c index a503594d6..e3f7395a9 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c index aec4008fb..e4920e5d1 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c index 552b08f8e..61471ea81 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c @@ -1,366 +1,365 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_tu(maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_tu(maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_tu(maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_tu(maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_tu(maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_tu(maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_tu(maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_tu(maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_tu(maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_tu(maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_tu(maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_tu(maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c index cea862e0f..0dacd5b3e 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c @@ -1,718 +1,717 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c index 62ced63e1..c28fb02ee 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c @@ -1,718 +1,717 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c index 7b0921a22..97c413c75 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -47,4 +47,3 @@ vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_ vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c index 8920a97a6..8f43c4416 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -47,4 +47,3 @@ vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_ vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c index 9e7df01ff..bb48799a5 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -47,4 +47,3 @@ vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_ vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c index 0cda20a97..ccf8caa8b 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -31,4 +31,3 @@ vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c index a3687efb9..3ebf605aa 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c @@ -1,34 +1,33 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c index 9c03f0061..8f353c311 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c @@ -1,34 +1,33 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c index 749e2f687..06f9b3ffc 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -1,12 +1,12 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -87,8 +87,3 @@ vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_u32m8_u32m8_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c index 390decdcb..63da91ed1 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c @@ -1,494 +1,493 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c index 33cff9e27..6d2504f27 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -1,94 +1,108 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c index eb5e53de7..1fa488b00 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -1,94 +1,108 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c index 00327588f..5635721bb 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -1,94 +1,108 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c index fc86d2d5c..a7f05f2b8 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -1,94 +1,108 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c index 8ffb3eaf4..c3f94c976 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c @@ -1,34 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c index 3da580d32..2df41ac05 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -1,34 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c index 352ea15d6..877402aee 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -1,74 +1,83 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesz_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c index de8cd112d..af084405c 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c @@ -1,718 +1,950 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c index f8c14b057..a9c542556 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c @@ -1,366 +1,434 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c index 83967d94f..a986b5ece 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c @@ -1,366 +1,434 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c index e20f8bff1..11f24f0b9 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c @@ -1,142 +1,189 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c index 87cb1377d..f9a4a8af7 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c @@ -1,142 +1,193 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c index 526c3a33d..5a1670759 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c @@ -1,34 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c index c5ba6e721..995625243 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vgmul_tu(vd, vs2, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul_tu(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c index a723aa6de..62c1e3e1e 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c @@ -1,366 +1,434 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c index aa8ba847d..6617d9830 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c @@ -1,718 +1,926 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c index e3545be3c..0fb6a2d3f 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c @@ -1,718 +1,926 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c index d7e8bf814..e61e23e6d 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c index 0dc6ff651..5ca7969f5 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c index b2193b03d..ef3478429 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c @@ -1,50 +1,58 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c index f32bff343..3bc96a360 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c @@ -1,18 +1,19 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } @@ -31,4 +32,3 @@ vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c index 657a2aed2..2fd5ab2ed 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c @@ -1,34 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c index f241b53f5..acf15ab27 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c @@ -1,34 +1,34 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c index 8d08ad373..e8ba1fd59 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -1,38 +1,44 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -40,19 +46,23 @@ vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -60,15 +70,18 @@ vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -76,19 +89,16 @@ vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c index c6772c8c7..49b8ee5a0 100644 --- a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c @@ -1,494 +1,650 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ -// RUN: -target-feature +experimental-zvbb \ -// RUN: -target-feature +experimental-zvbc \ -// RUN: -target-feature +experimental-zvkg \ -// RUN: -target-feature +experimental-zvkned \ -// RUN: -target-feature +experimental-zvknhb \ -// RUN: -target-feature +experimental-zvksed \ -// RUN: -target-feature +experimental-zvksh -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c index 7e2d582b1..1c1f98128 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -1,86 +1,97 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdf_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesdf_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdf_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c index 191885804..2eb7f3517 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -1,86 +1,97 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesdm_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesdm_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesdm_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c index 2230a962c..bd17e9ddc 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -1,86 +1,97 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesef_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesef_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesef_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c index f0fff627e..fdbb66b41 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -1,86 +1,97 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesem_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaesem_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vaesem_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c index f531dd6af..8f11194f5 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c @@ -1,26 +1,27 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c index ef989068e..2f71a4d13 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c @@ -1,26 +1,27 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vaeskf2_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c index b4364cdd2..6687ccb2c 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c @@ -1,66 +1,72 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { - return __riscv_vaesz_tu(vd, vs2, vl); -} - -vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } -vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vaesz_tu(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c index 1ecbcdfa9..73315e18a 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c @@ -1,710 +1,939 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c index c0c9eb726..b46e2114c 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c @@ -1,358 +1,423 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c index c375826e5..03f632695 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c @@ -1,358 +1,423 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c index 3fe950acd..488ab2300 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c @@ -1,134 +1,178 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c index cb04c9935..06a287746 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c @@ -1,134 +1,182 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { - return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c index eeb1718a4..a346e788f 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c @@ -1,26 +1,27 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c index a50b7e4a9..fc282ac60 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c @@ -1,10 +1,8 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vgmul_tu(vd, vs2, vl); } @@ -23,4 +21,3 @@ vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vgmul_tu(vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c index 55f6bf42e..d56e3555e 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c @@ -1,358 +1,423 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_tu(maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { - return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c index 8b7154ede..0f9405dcd 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c @@ -1,710 +1,915 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c index b2856896f..6f97b5a85 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c @@ -1,710 +1,915 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl) { - return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c index cf1afc07f..eb2435d9c 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c @@ -1,42 +1,47 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c index a385bfd49..f657a7901 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c @@ -1,42 +1,47 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c index ae5e74fff..349f16c5b 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c @@ -1,42 +1,47 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c index b784b6537..1778de96a 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c @@ -1,10 +1,8 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } @@ -23,4 +21,3 @@ vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm3c_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c index 46ddc2d66..f4536867d 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c @@ -1,26 +1,27 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { - return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c index e1f938477..ac789ac44 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c @@ -1,26 +1,23 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } -vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c index bf509ae52..46cf176d3 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c @@ -1,30 +1,33 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -32,19 +35,23 @@ vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -52,15 +59,18 @@ vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } @@ -68,19 +78,16 @@ vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } -vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } -vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vsm4r_vs_tu(vd, vs2, vl); } vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { return __riscv_vsm4r_vv_tu(vd, vs2, vl); } - -vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { - return __riscv_vsm4r_vs_tu(vd, vs2, vl); -} - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c index e316013f9..e3736d299 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c @@ -1,486 +1,639 @@ -#include #include +#include -typedef _Float16 float16_t; -typedef float float32_t; -typedef double float64_t; -vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl); +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) { - return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl); +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); } - diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index a1769992b..bb908fb39 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -1,1038 +1,1045 @@ -## Zvbb - Vector Bit-manipulation used in Cryptography: +=== Zvbb - Vector Bit-manipulation used in Cryptography -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not -**Prototypes:** -``` C -vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +[,c] +---- +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements -**Prototypes:** -``` C -vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +[,c] +---- +vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); // masked functions -vuint8mf8_t __riscv_vbrev_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -``` +vuint8mf8_t __riscv_vbrev_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- -### [Vector Bit-manipulation used in Cryptography - Count Bits](): -This operation don't have Policy Intrinsic Functions. +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Count Bits +Intrinsics here don't have a policy variant. -### [Vector Bit-manipulation used in Cryptography - Rotate](): +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Rotate -**Prototypes:** -``` C -vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +[,c] +---- +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); // masked functions -vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -``` +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- -### [Vector Bit-manipulation used in Cryptography - Shift](): +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation used - Widening Shift -**Prototypes:** -``` C -vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +[,c] +---- +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); // masked functions -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -``` +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- -## Zvbc - Vector Carryless Multiplication: +=== Zvbc - Vector Carryless Multiplication -### [Vector Carryless Multiplication](): +[[policy-variant-overloaded]] +==== Vector Carryless Multiplication -**Prototypes:** -``` C -vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +[,c] +---- +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); // masked functions -vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- -## Zvkg - Vector GCM/GMAC: +=== Zvkg - Vector GCM/GMAC -### [Vector GCM/GMAC](): +[[policy-variant-overloaded]] +==== Vector GCM/GMAC -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vghsh_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -1043,14 +1050,15 @@ vuint32m1_t __riscv_vgmul_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[policy-variant-overloaded]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1070,7 +1078,6 @@ vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1090,13 +1097,13 @@ vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[policy-variant-overloaded]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1116,7 +1123,6 @@ vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1136,29 +1142,30 @@ vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[policy-variant-overloaded]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C -vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[policy-variant-overloaded]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -1173,15 +1180,15 @@ vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[policy-variant-overloaded]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -1191,12 +1198,13 @@ vuint64m1_t __riscv_vsha2ms_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1 vuint64m2_t __riscv_vsha2ms_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[policy-variant-overloaded]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -1215,25 +1223,27 @@ vuint64m1_t __riscv_vsha2cl_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1 vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[policy-variant-overloaded]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +[,c] +---- +vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- -### [Vector SM4 Rounds](): +[[policy-variant-overloaded]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -1253,29 +1263,30 @@ vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -## Zvksh - ShangMi Suite: SM3 Secure Hash: +=== Zvksh - ShangMi Suite: SM3 Secure Hash -### [Vector SM3 Message Expansion](): +[[policy-variant-overloaded]] +==== Vector SM3 Message Expansion -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` +[,c] +---- +vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- -### [Vector SM3 Message Expansion](): +[[policy-variant-overloaded]] +==== Vector SM3 Compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..1ad1d2345 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,958 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Reverse Bits in Elements + +[,c] +---- +vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Count Bits +Intrinsics here don't have a policy variant. + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md deleted file mode 100644 index 4bcf7ffbd..000000000 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.md +++ /dev/null @@ -1,953 +0,0 @@ - -## Zvbb - Vector Bit-manipulation used in Cryptography: - -### [Vector Bit-manipulation used in Cryptography - Bitwise And-Not](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vandn_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl); -vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vandn_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl); -vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vandn_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl); -vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vandn_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl); -vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vandn_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl); -vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vandn_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl); -vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vandn_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl); -vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vandn_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl); -vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vandn_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl); -vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vandn_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl); -vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vandn_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl); -vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vandn_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl); -vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vandn_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl); -vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vandn_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl); -vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vandn_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl); -vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vandn_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl); -vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vandn_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl); -vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vandn_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl); -vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vandn_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vandn_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vandn_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vandn_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Reverse Bits](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -// masked functions -vuint8mf8_t __riscv_vbrev_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vbrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vbrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vbrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vbrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vbrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vbrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vbrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vbrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vbrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vbrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vbrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vbrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vbrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vbrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vbrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vbrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vbrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vbrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vbrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vbrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vbrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vbrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -vuint8mf8_t __riscv_vrev8_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl); -vuint8mf4_t __riscv_vrev8_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl); -vuint8mf2_t __riscv_vrev8_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl); -vuint8m1_t __riscv_vrev8_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl); -vuint8m2_t __riscv_vrev8_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl); -vuint8m4_t __riscv_vrev8_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl); -vuint8m8_t __riscv_vrev8_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl); -vuint16mf4_t __riscv_vrev8_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl); -vuint16mf2_t __riscv_vrev8_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl); -vuint16m1_t __riscv_vrev8_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl); -vuint16m2_t __riscv_vrev8_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl); -vuint16m4_t __riscv_vrev8_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl); -vuint16m8_t __riscv_vrev8_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl); -vuint32mf2_t __riscv_vrev8_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl); -vuint32m1_t __riscv_vrev8_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl); -vuint32m2_t __riscv_vrev8_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl); -vuint32m4_t __riscv_vrev8_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vrev8_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl); -vuint64m1_t __riscv_vrev8_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl); -vuint64m2_t __riscv_vrev8_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl); -vuint64m4_t __riscv_vrev8_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl); -vuint64m8_t __riscv_vrev8_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Count Bits](): -This operation don't have Policy Intrinsic Functions. - -### [Vector Bit-manipulation used in Cryptography - Rotate](): - -**Prototypes:** -``` C -vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_tu (vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_tu (vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_tu (vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_tu (vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_tu (vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_tu (vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_tu (vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_tu (vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_tu (vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_tu (vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_tu (vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_tu (vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_tu (vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_tum (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_tum (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_tum (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_tum (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_tum (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_tum (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -// masked functions -vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vrol_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vrol_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vrol_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vrol_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vrol_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vrol_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vrol_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vrol_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vrol_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vrol_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vrol_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vrol_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vrol_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vrol_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vrol_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vrol_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vrol_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vrol_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vrol_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vrol_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vrol_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vrol_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint8mf8_t __riscv_vror_mu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint8mf4_t __riscv_vror_mu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint8mf2_t __riscv_vror_mu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint8m1_t __riscv_vror_mu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint8m2_t __riscv_vror_mu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint8m4_t __riscv_vror_mu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); -vuint8m8_t __riscv_vror_mu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t rs1, size_t vl); -vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint16mf4_t __riscv_vror_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint16mf2_t __riscv_vror_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint16m1_t __riscv_vror_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint16m2_t __riscv_vror_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint16m4_t __riscv_vror_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); -vuint16m8_t __riscv_vror_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32mf2_t __riscv_vror_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m1_t __riscv_vror_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m2_t __riscv_vror_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m4_t __riscv_vror_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -vuint32m8_t __riscv_vror_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vror_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vror_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vror_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vror_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t rs1, size_t vl); -``` - -### [Vector Bit-manipulation used in Cryptography - Shift](): - -**Prototypes:** -``` C -vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tu (vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tu (vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tu (vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tu (vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tu (vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tu (vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tu (vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tu (vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tu (vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tu (vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tu (vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tu (vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tum (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tum (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tum (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tum (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tum (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tum (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tum (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tum (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tum (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -// masked functions -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); -vuint16mf4_t __riscv_vwsll_mu (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); -vuint16mf2_t __riscv_vwsll_mu (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t vs2, size_t rs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); -vuint16m1_t __riscv_vwsll_mu (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); -vuint16m2_t __riscv_vwsll_mu (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); -vuint16m4_t __riscv_vwsll_mu (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); -vuint16m8_t __riscv_vwsll_mu (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t vs2, size_t rs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); -vuint32mf2_t __riscv_vwsll_mu (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vwsll_mu (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); -vuint32m2_t __riscv_vwsll_mu (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); -vuint32m4_t __riscv_vwsll_mu (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); -vuint32m8_t __riscv_vwsll_mu (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t vs2, size_t rs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint64m1_t __riscv_vwsll_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint64m2_t __riscv_vwsll_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint64m4_t __riscv_vwsll_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint64m8_t __riscv_vwsll_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..98ab2a820 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,76 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-overloaded]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md deleted file mode 100644 index 6d12267b2..000000000 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.md +++ /dev/null @@ -1,75 +0,0 @@ - -## Zvbc - Vector Carryless Multiplication: - -### [Vector Carryless Multiplication](): - -**Prototypes:** -``` C -vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tum (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tum (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tum (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -// masked functions -vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmul_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmul_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmul_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmul_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); -vuint64m1_t __riscv_vclmulh_mu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl); -vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); -vuint64m2_t __riscv_vclmulh_mu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl); -vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); -vuint64m4_t __riscv_vclmulh_mu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl); -vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -vuint64m8_t __riscv_vclmulh_mu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl); -``` diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc similarity index 90% rename from auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md rename to auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc index 0f44b8ea2..36e253baf 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -1,10 +1,11 @@ -## Zvkg - Vector GCM/GMAC: +=== Zvkg - Vector GCM/GMAC -### [Vector GCM/GMAC](): +[[policy-variant-overloaded]] +==== Vector GCM/GMAC -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vghsh_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vghsh_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vghsh_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -15,4 +16,4 @@ vuint32m1_t __riscv_vgmul_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc similarity index 86% rename from auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md rename to auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc index 6b16a5a48..46b66b36f 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -1,10 +1,11 @@ -## Zvkned - NIST Suite: Vector AES Block Cipher: +=== Zvkned - NIST Suite: Vector AES Block Cipher -### [Vector AES Encryption](): +[[policy-variant-overloaded]] +==== Vector AES Encryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -24,7 +25,6 @@ vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesef_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -44,13 +44,13 @@ vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesem_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES Decryption](): +[[policy-variant-overloaded]] +==== Vector AES Decryption -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -70,7 +70,6 @@ vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdf_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -90,29 +89,30 @@ vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesdm_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- -### [Vector AES-128 Forward KeySchedule generation](): +[[policy-variant-overloaded]] +==== Vector AES-128 Forward KeySchedule generation -**Prototypes:** -``` C -vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +---- -### [Vector AES round zero](): +[[policy-variant-overloaded]] +==== Vector AES round zero -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); @@ -127,5 +127,4 @@ vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); -vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc similarity index 92% rename from auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md rename to auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc index 7f060208e..118223db5 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -1,10 +1,11 @@ -## Zvknh - NIST Suite: Vector SHA-2 Secure Hash: +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash -### [Vector SHA-2 message schedule](): +[[policy-variant-overloaded]] +==== Vector SHA-2 message schedule -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ms_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ms_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ms_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -14,12 +15,13 @@ vuint64m1_t __riscv_vsha2ms_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1 vuint64m2_t __riscv_vsha2ms_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2ms_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2ms_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- -### [Vector SHA-2 two rounds of compression](): +[[policy-variant-overloaded]] +==== Vector SHA-2 two rounds of compression -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsha2ch_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); vuint32m1_t __riscv_vsha2ch_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); vuint32m2_t __riscv_vsha2ch_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); @@ -38,4 +40,4 @@ vuint64m1_t __riscv_vsha2cl_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1 vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc similarity index 67% rename from auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md rename to auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc index 3129cb528..304925935 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -1,21 +1,23 @@ -## Zvksed - ShangMi Suite: SM4 Block Cipher: +=== Zvksed - ShangMi Suite: SM4 Block Cipher -### [Vector SM4 KeyExpansion](): +[[policy-variant-overloaded]] +==== Vector SM4 KeyExpansion -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl); -``` +[,c] +---- +vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- -### [Vector SM4 Rounds](): +[[policy-variant-overloaded]] +==== Vector SM4 Rounds -**Prototypes:** -``` C +[,c] +---- vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); @@ -35,5 +37,4 @@ vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); -``` +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..b907f2879 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,26 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md deleted file mode 100644 index cb93f408d..000000000 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.md +++ /dev/null @@ -1,24 +0,0 @@ - -## Zvksh - ShangMi Suite: SM3 Secure Hash: - -### [Vector SM3 Message Expansion](): - -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); -vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); -vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); -vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); -vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); -``` - -### [Vector SM3 Message Expansion](): - -**Prototypes:** -``` C -vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); -vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); -vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); -vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); -vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); -``` From bb05072c0e34f358a936d68e3018ba73050f1dd3 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Tue, 21 May 2024 09:28:04 -0700 Subject: [PATCH 43/44] Change the description of bit/byte reverse --- auto-generated/vector-crypto/intrinsic_funcs.md | 2 +- .../00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc | 2 +- auto-generated/vector-crypto/overloaded_intrinsic_funcs.md | 2 +- .../00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc | 2 +- auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md | 2 +- .../00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc | 2 +- .../vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md | 2 +- .../00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc | 2 +- rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 4b6c01fc4..993aa0ff3 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -98,7 +98,7 @@ vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1 ---- [[]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index 25b9c4d67..e5ac8dce7 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -98,7 +98,7 @@ vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1 ---- [[]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 63ef95b6a..00ace0fa3 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -98,7 +98,7 @@ vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl ---- [[overloaded-]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index 15ebc0022..44331cd43 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -98,7 +98,7 @@ vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl ---- [[overloaded-]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 9e568e634..80603a138 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -188,7 +188,7 @@ vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t ---- [[policy-variant-]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index b261c089c..b44053593 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -188,7 +188,7 @@ vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t ---- [[policy-variant-]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index bb908fb39..559a5b80c 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -188,7 +188,7 @@ vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint ---- [[policy-variant-overloaded]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index 1ad1d2345..eef98e779 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -188,7 +188,7 @@ vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint ---- [[policy-variant-overloaded]] -==== Vector Basic Bit-manipulation - Reverse Bits in Elements +==== Vector Basic Bit-manipulation - Reverse [,c] ---- diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index 210209181..1877e46b9 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -24,7 +24,7 @@ def gen(g): g.function_group( vector_crypto_template, - "Vector Basic Bit-manipulation - Reverse Bits in Elements", + "Vector Basic Bit-manipulation - Reverse", "", # FIXME: We probably have a separate document for vector-crypto ["vbrev", "vbrev8", "vrev8"], UITYPE, From 3374d743f2852eb0f1c32b9e0ace82f102f2752d Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Mon, 3 Jun 2024 22:56:20 -0700 Subject: [PATCH 44/44] Add missing vcpop, vclz, vctz in api tests --- .../vector-crypto/api-testing/vcpop.c | 178 +++++++ .../vector-crypto/intrinsic_funcs.md | 52 +++ ...bit-manipulation_used_in_cryptography.adoc | 52 +++ .../vector-crypto/llvm-api-tests/vcpop.c | 182 ++++++++ .../llvm-overloaded-tests/vcpop.c | 182 ++++++++ .../overloaded-api-testing/vcpop.c | 178 +++++++ .../overloaded_intrinsic_funcs.md | 52 +++ ...bit-manipulation_used_in_cryptography.adoc | 52 +++ .../policy_funcs/api-testing/vclz.c | 354 ++++++++++++++ .../policy_funcs/api-testing/vcpop.c | 354 ++++++++++++++ .../policy_funcs/api-testing/vctz.c | 354 ++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 282 +++++++++++- ...bit-manipulation_used_in_cryptography.adoc | 282 +++++++++++- .../policy_funcs/llvm-api-tests/vclz.c | 365 +++++++++++++++ .../policy_funcs/llvm-api-tests/vcpop.c | 358 +++++++++++++++ .../policy_funcs/llvm-api-tests/vctz.c | 365 +++++++++++++++ .../policy_funcs/llvm-overloaded-tests/vclz.c | 434 ++++++++++++++++++ .../llvm-overloaded-tests/vcpop.c | 427 +++++++++++++++++ .../policy_funcs/llvm-overloaded-tests/vctz.c | 434 ++++++++++++++++++ .../overloaded-api-testing/vclz.c | 423 +++++++++++++++++ .../overloaded-api-testing/vcpop.c | 423 +++++++++++++++++ .../overloaded-api-testing/vctz.c | 423 +++++++++++++++++ .../overloaded_intrinsic_funcs.md | 282 +++++++++++- ...bit-manipulation_used_in_cryptography.adoc | 282 +++++++++++- .../rvv_intrinsic_gen/vector_crypto_inst.py | 12 +- 25 files changed, 6777 insertions(+), 5 deletions(-) create mode 100644 auto-generated/vector-crypto/api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c diff --git a/auto-generated/vector-crypto/api-testing/vcpop.c b/auto-generated/vector-crypto/api-testing/vcpop.c new file mode 100644 index 000000000..d3c52d8fd --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 993aa0ff3..5e8c4df54 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index e5ac8dce7..be1bbf32e 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..1061c2222 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c @@ -0,0 +1,182 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..aee4aff80 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,182 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..cf5ec1edd --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 00ace0fa3..fe4429338 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[overloaded-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index 44331cd43..c32b967ed 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[overloaded-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c new file mode 100644 index 000000000..6e3e1120f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c new file mode 100644 index 000000000..7dbb9b78c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c new file mode 100644 index 000000000..b191067e8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 80603a138..444080442 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t v [[policy-variant-]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index b44053593..4433b14fb 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t v [[policy-variant-]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c new file mode 100644 index 000000000..d9c132cd7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..2f89711dc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c @@ -0,0 +1,358 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c new file mode 100644 index 000000000..54d7ee887 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..e93b008a3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..4eb8efa2b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,427 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..8cecc11d2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..2d8b78be7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..10f897107 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..3e0bce679 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index 559a5b80c..f2f92ae9f 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size [[policy-variant-overloaded]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-overloaded]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index eef98e779..a4d961b88 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size [[policy-variant-overloaded]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-overloaded]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index 1877e46b9..7635912e1 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -40,7 +40,17 @@ def gen(g): UITYPE, SEWS, LMULS, - decorators.has_masking_no_maskedoff) + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Vector Population Count", + "", # FIXME: We probably have a separate document for vector-crypto + ["vcpop"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) g.function_group( vector_crypto_template,